| 8 1284 961 348 1 348 1 29 1271 1284 1271 25 1281 9 1278 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 | // SPDX-License-Identifier: GPL-2.0-or-later /* * SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions * * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2014 Red Hat Inc. * Copyright 2025 Google LLC */ #include <crypto/hmac.h> #include <crypto/sha2.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/unaligned.h> #include <linux/wordpart.h> #include "fips.h" static const struct sha256_block_state sha224_iv = { .h = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, }, }; static const struct sha256_ctx initial_sha256_ctx = { .ctx = { .state = { .h = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }, }, .bytecount = 0, }, }; #define sha256_iv (initial_sha256_ctx.ctx.state) static const u32 sha256_K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, }; #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) #define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y)))) #define e0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22)) #define e1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25)) #define s0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3)) #define s1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10)) static inline void LOAD_OP(int I, u32 *W, const u8 *input) { W[I] = get_unaligned_be32((__u32 *)input + I); } static inline void BLEND_OP(int I, u32 *W) { W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16]; } #define SHA256_ROUND(i, a, b, c, d, e, f, g, h) \ do { \ u32 t1, t2; \ t1 = h + e1(e) + Ch(e, f, g) + sha256_K[i] + W[i]; \ t2 = e0(a) + Maj(a, b, c); \ d += t1; \ h = t1 + t2; \ } while (0) static void sha256_block_generic(struct sha256_block_state *state, const u8 *input, u32 W[64]) { u32 a, b, c, d, e, f, g, h; int i; /* load the input */ for (i = 0; i < 16; i += 8) { LOAD_OP(i + 0, W, input); LOAD_OP(i + 1, W, input); LOAD_OP(i + 2, W, input); LOAD_OP(i + 3, W, input); LOAD_OP(i + 4, W, input); LOAD_OP(i + 5, W, input); LOAD_OP(i + 6, W, input); LOAD_OP(i + 7, W, input); } /* now blend */ for (i = 16; i < 64; i += 8) { BLEND_OP(i + 0, W); BLEND_OP(i + 1, W); BLEND_OP(i + 2, W); BLEND_OP(i + 3, W); BLEND_OP(i + 4, W); BLEND_OP(i + 5, W); BLEND_OP(i + 6, W); BLEND_OP(i + 7, W); } /* load the state into our registers */ a = state->h[0]; b = state->h[1]; c = state->h[2]; d = state->h[3]; e = state->h[4]; f = state->h[5]; g = state->h[6]; h = state->h[7]; /* now iterate */ for (i = 0; i < 64; i += 8) { SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); } state->h[0] += a; state->h[1] += b; state->h[2] += c; state->h[3] += d; state->h[4] += e; state->h[5] += f; state->h[6] += g; state->h[7] += h; } static void __maybe_unused sha256_blocks_generic(struct sha256_block_state *state, const u8 *data, size_t nblocks) { u32 W[64]; do { sha256_block_generic(state, data, W); data += SHA256_BLOCK_SIZE; } while (--nblocks); memzero_explicit(W, sizeof(W)); } #if defined(CONFIG_CRYPTO_LIB_SHA256_ARCH) && !defined(__DISABLE_EXPORTS) #include "sha256.h" /* $(SRCARCH)/sha256.h */ #else #define sha256_blocks sha256_blocks_generic #endif static void __sha256_init(struct __sha256_ctx *ctx, const struct sha256_block_state *iv, u64 initial_bytecount) { ctx->state = *iv; ctx->bytecount = initial_bytecount; } void sha224_init(struct sha224_ctx *ctx) { __sha256_init(&ctx->ctx, &sha224_iv, 0); } EXPORT_SYMBOL_GPL(sha224_init); void sha256_init(struct sha256_ctx *ctx) { __sha256_init(&ctx->ctx, &sha256_iv, 0); } EXPORT_SYMBOL_GPL(sha256_init); void __sha256_update(struct __sha256_ctx *ctx, const u8 *data, size_t len) { size_t partial = ctx->bytecount % SHA256_BLOCK_SIZE; ctx->bytecount += len; if (partial + len >= SHA256_BLOCK_SIZE) { size_t nblocks; if (partial) { size_t l = SHA256_BLOCK_SIZE - partial; memcpy(&ctx->buf[partial], data, l); data += l; len -= l; sha256_blocks(&ctx->state, ctx->buf, 1); } nblocks = len / SHA256_BLOCK_SIZE; len %= SHA256_BLOCK_SIZE; if (nblocks) { sha256_blocks(&ctx->state, data, nblocks); data += nblocks * SHA256_BLOCK_SIZE; } partial = 0; } if (len) memcpy(&ctx->buf[partial], data, len); } EXPORT_SYMBOL(__sha256_update); static void __sha256_final(struct __sha256_ctx *ctx, u8 *out, size_t digest_size) { u64 bitcount = ctx->bytecount << 3; size_t partial = ctx->bytecount % SHA256_BLOCK_SIZE; ctx->buf[partial++] = 0x80; if (partial > SHA256_BLOCK_SIZE - 8) { memset(&ctx->buf[partial], 0, SHA256_BLOCK_SIZE - partial); sha256_blocks(&ctx->state, ctx->buf, 1); partial = 0; } memset(&ctx->buf[partial], 0, SHA256_BLOCK_SIZE - 8 - partial); *(__be64 *)&ctx->buf[SHA256_BLOCK_SIZE - 8] = cpu_to_be64(bitcount); sha256_blocks(&ctx->state, ctx->buf, 1); for (size_t i = 0; i < digest_size; i += 4) put_unaligned_be32(ctx->state.h[i / 4], out + i); } void sha224_final(struct sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]) { __sha256_final(&ctx->ctx, out, SHA224_DIGEST_SIZE); memzero_explicit(ctx, sizeof(*ctx)); } EXPORT_SYMBOL(sha224_final); void sha256_final(struct sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]) { __sha256_final(&ctx->ctx, out, SHA256_DIGEST_SIZE); memzero_explicit(ctx, sizeof(*ctx)); } EXPORT_SYMBOL(sha256_final); void sha224(const u8 *data, size_t len, u8 out[SHA224_DIGEST_SIZE]) { struct sha224_ctx ctx; sha224_init(&ctx); sha224_update(&ctx, data, len); sha224_final(&ctx, out); } EXPORT_SYMBOL(sha224); void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]) { struct sha256_ctx ctx; sha256_init(&ctx); sha256_update(&ctx, data, len); sha256_final(&ctx, out); } EXPORT_SYMBOL(sha256); /* * Pre-boot environments (as indicated by __DISABLE_EXPORTS being defined) just * need the generic SHA-256 code. Omit all other features from them. */ #ifndef __DISABLE_EXPORTS #ifndef sha256_finup_2x_arch static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx, const u8 *data1, const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE], u8 out2[SHA256_DIGEST_SIZE]) { return false; } static bool sha256_finup_2x_is_optimized_arch(void) { return false; } #endif /* Sequential fallback implementation of sha256_finup_2x() */ static noinline_for_stack void sha256_finup_2x_sequential( const struct __sha256_ctx *ctx, const u8 *data1, const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE], u8 out2[SHA256_DIGEST_SIZE]) { struct __sha256_ctx mut_ctx; mut_ctx = *ctx; __sha256_update(&mut_ctx, data1, len); __sha256_final(&mut_ctx, out1, SHA256_DIGEST_SIZE); mut_ctx = *ctx; __sha256_update(&mut_ctx, data2, len); __sha256_final(&mut_ctx, out2, SHA256_DIGEST_SIZE); } void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1, const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE], u8 out2[SHA256_DIGEST_SIZE]) { if (ctx == NULL) ctx = &initial_sha256_ctx; if (likely(sha256_finup_2x_arch(&ctx->ctx, data1, data2, len, out1, out2))) return; sha256_finup_2x_sequential(&ctx->ctx, data1, data2, len, out1, out2); } EXPORT_SYMBOL_GPL(sha256_finup_2x); bool sha256_finup_2x_is_optimized(void) { return sha256_finup_2x_is_optimized_arch(); } EXPORT_SYMBOL_GPL(sha256_finup_2x_is_optimized); static void __hmac_sha256_preparekey(struct sha256_block_state *istate, struct sha256_block_state *ostate, const u8 *raw_key, size_t raw_key_len, const struct sha256_block_state *iv) { union { u8 b[SHA256_BLOCK_SIZE]; unsigned long w[SHA256_BLOCK_SIZE / sizeof(unsigned long)]; } derived_key = { 0 }; if (unlikely(raw_key_len > SHA256_BLOCK_SIZE)) { if (iv == &sha224_iv) sha224(raw_key, raw_key_len, derived_key.b); else sha256(raw_key, raw_key_len, derived_key.b); } else { memcpy(derived_key.b, raw_key, raw_key_len); } for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) derived_key.w[i] ^= REPEAT_BYTE(HMAC_IPAD_VALUE); *istate = *iv; sha256_blocks(istate, derived_key.b, 1); for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) derived_key.w[i] ^= REPEAT_BYTE(HMAC_OPAD_VALUE ^ HMAC_IPAD_VALUE); *ostate = *iv; sha256_blocks(ostate, derived_key.b, 1); memzero_explicit(&derived_key, sizeof(derived_key)); } void hmac_sha224_preparekey(struct hmac_sha224_key *key, const u8 *raw_key, size_t raw_key_len) { __hmac_sha256_preparekey(&key->key.istate, &key->key.ostate, raw_key, raw_key_len, &sha224_iv); } EXPORT_SYMBOL_GPL(hmac_sha224_preparekey); void hmac_sha256_preparekey(struct hmac_sha256_key *key, const u8 *raw_key, size_t raw_key_len) { __hmac_sha256_preparekey(&key->key.istate, &key->key.ostate, raw_key, raw_key_len, &sha256_iv); } EXPORT_SYMBOL_GPL(hmac_sha256_preparekey); void __hmac_sha256_init(struct __hmac_sha256_ctx *ctx, const struct __hmac_sha256_key *key) { __sha256_init(&ctx->sha_ctx, &key->istate, SHA256_BLOCK_SIZE); ctx->ostate = key->ostate; } EXPORT_SYMBOL_GPL(__hmac_sha256_init); void hmac_sha224_init_usingrawkey(struct hmac_sha224_ctx *ctx, const u8 *raw_key, size_t raw_key_len) { __hmac_sha256_preparekey(&ctx->ctx.sha_ctx.state, &ctx->ctx.ostate, raw_key, raw_key_len, &sha224_iv); ctx->ctx.sha_ctx.bytecount = SHA256_BLOCK_SIZE; } EXPORT_SYMBOL_GPL(hmac_sha224_init_usingrawkey); void hmac_sha256_init_usingrawkey(struct hmac_sha256_ctx *ctx, const u8 *raw_key, size_t raw_key_len) { __hmac_sha256_preparekey(&ctx->ctx.sha_ctx.state, &ctx->ctx.ostate, raw_key, raw_key_len, &sha256_iv); ctx->ctx.sha_ctx.bytecount = SHA256_BLOCK_SIZE; } EXPORT_SYMBOL_GPL(hmac_sha256_init_usingrawkey); static void __hmac_sha256_final(struct __hmac_sha256_ctx *ctx, u8 *out, size_t digest_size) { /* Generate the padded input for the outer hash in ctx->sha_ctx.buf. */ __sha256_final(&ctx->sha_ctx, ctx->sha_ctx.buf, digest_size); memset(&ctx->sha_ctx.buf[digest_size], 0, SHA256_BLOCK_SIZE - digest_size); ctx->sha_ctx.buf[digest_size] = 0x80; *(__be32 *)&ctx->sha_ctx.buf[SHA256_BLOCK_SIZE - 4] = cpu_to_be32(8 * (SHA256_BLOCK_SIZE + digest_size)); /* Compute the outer hash, which gives the HMAC value. */ sha256_blocks(&ctx->ostate, ctx->sha_ctx.buf, 1); for (size_t i = 0; i < digest_size; i += 4) put_unaligned_be32(ctx->ostate.h[i / 4], out + i); memzero_explicit(ctx, sizeof(*ctx)); } void hmac_sha224_final(struct hmac_sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]) { __hmac_sha256_final(&ctx->ctx, out, SHA224_DIGEST_SIZE); } EXPORT_SYMBOL_GPL(hmac_sha224_final); void hmac_sha256_final(struct hmac_sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]) { __hmac_sha256_final(&ctx->ctx, out, SHA256_DIGEST_SIZE); } EXPORT_SYMBOL_GPL(hmac_sha256_final); void hmac_sha224(const struct hmac_sha224_key *key, const u8 *data, size_t data_len, u8 out[SHA224_DIGEST_SIZE]) { struct hmac_sha224_ctx ctx; hmac_sha224_init(&ctx, key); hmac_sha224_update(&ctx, data, data_len); hmac_sha224_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha224); void hmac_sha256(const struct hmac_sha256_key *key, const u8 *data, size_t data_len, u8 out[SHA256_DIGEST_SIZE]) { struct hmac_sha256_ctx ctx; hmac_sha256_init(&ctx, key); hmac_sha256_update(&ctx, data, data_len); hmac_sha256_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha256); void hmac_sha224_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, u8 out[SHA224_DIGEST_SIZE]) { struct hmac_sha224_ctx ctx; hmac_sha224_init_usingrawkey(&ctx, raw_key, raw_key_len); hmac_sha224_update(&ctx, data, data_len); hmac_sha224_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha224_usingrawkey); void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, u8 out[SHA256_DIGEST_SIZE]) { struct hmac_sha256_ctx ctx; hmac_sha256_init_usingrawkey(&ctx, raw_key, raw_key_len); hmac_sha256_update(&ctx, data, data_len); hmac_sha256_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha256_usingrawkey); #if defined(sha256_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha256_mod_init(void) { #ifdef sha256_mod_init_arch sha256_mod_init_arch(); #endif if (fips_enabled) { /* * FIPS cryptographic algorithm self-test. As per the FIPS * Implementation Guidance, testing HMAC-SHA256 satisfies the * test requirement for SHA-224, SHA-256, and HMAC-SHA224 too. */ u8 mac[SHA256_DIGEST_SIZE]; hmac_sha256_usingrawkey(fips_test_key, sizeof(fips_test_key), fips_test_data, sizeof(fips_test_data), mac); if (memcmp(fips_test_hmac_sha256_value, mac, sizeof(mac)) != 0) panic("sha256: FIPS self-test failed\n"); } return 0; } subsys_initcall(sha256_mod_init); static void __exit sha256_mod_exit(void) { } module_exit(sha256_mod_exit); #endif #endif /* !__DISABLE_EXPORTS */ MODULE_DESCRIPTION("SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions"); MODULE_LICENSE("GPL"); |
| 5105 962 5164 3887 958 3910 3916 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 | // SPDX-License-Identifier: GPL-2.0 #include <linux/memblock.h> #include <linux/mmdebug.h> #include <linux/export.h> #include <linux/mm.h> #include <asm/page.h> #include <linux/vmalloc.h> #include "physaddr.h" #ifdef CONFIG_X86_64 #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x)); } return x; } EXPORT_SYMBOL(__phys_addr); #endif bool __virt_addr_valid(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; if (y >= KERNEL_IMAGE_SIZE) return false; } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ if ((x > y) || !phys_addr_valid(x)) return false; } return pfn_valid(x >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #else #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long phys_addr = x - PAGE_OFFSET; /* VMALLOC_* aren't constants */ VIRTUAL_BUG_ON(x < PAGE_OFFSET); VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); /* max_low_pfn is set early, but not _that_ early */ if (max_low_pfn) { VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn); BUG_ON(slow_virt_to_phys((void *)x) != phys_addr); } return phys_addr; } EXPORT_SYMBOL(__phys_addr); #endif bool __virt_addr_valid(unsigned long x) { if (x < PAGE_OFFSET) return false; if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) return false; if (x >= FIXADDR_START) return false; return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #endif /* CONFIG_X86_64 */ |
| 1 1 1 1 1 1 1 1 1 1 1 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 | // SPDX-License-Identifier: GPL-2.0-only /* * fs/kernfs/mount.c - kernfs mount implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/seq_file.h> #include <linux/exportfs.h> #include <linux/uuid.h> #include <linux/statfs.h> #include "kernfs-internal.h" struct kmem_cache *kernfs_node_cache __ro_after_init; struct kmem_cache *kernfs_iattrs_cache __ro_after_init; struct kernfs_global_locks *kernfs_locks __ro_after_init; static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) { struct kernfs_root *root = kernfs_root(kernfs_dentry_node(dentry)); struct kernfs_syscall_ops *scops = root->syscall_ops; if (scops && scops->show_options) return scops->show_options(sf, root); return 0; } static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry) { struct kernfs_node *node = kernfs_dentry_node(dentry); struct kernfs_root *root = kernfs_root(node); struct kernfs_syscall_ops *scops = root->syscall_ops; if (scops && scops->show_path) return scops->show_path(sf, node, root); seq_dentry(sf, dentry, " \t\n\\"); return 0; } static int kernfs_statfs(struct dentry *dentry, struct kstatfs *buf) { simple_statfs(dentry, buf); buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); return 0; } const struct super_operations kernfs_sops = { .statfs = kernfs_statfs, .drop_inode = inode_just_drop, .evict_inode = kernfs_evict_inode, .show_options = kernfs_sop_show_options, .show_path = kernfs_sop_show_path, /* * sysfs is built on top of kernfs and sysfs provides the power * management infrastructure to support suspend/hibernate by * writing to various files in /sys/power/. As filesystems may * be automatically frozen during suspend/hibernate implementing * freeze/thaw support for kernfs generically will cause * deadlocks as the suspending/hibernation initiating task will * hold a VFS lock that it will then wait upon to be released. * If freeze/thaw for kernfs is needed talk to the VFS. */ .freeze_fs = NULL, .unfreeze_fs = NULL, .freeze_super = NULL, .thaw_super = NULL, }; static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent) { struct kernfs_node *kn = inode->i_private; if (*max_len < 2) { *max_len = 2; return FILEID_INVALID; } *max_len = 2; *(u64 *)fh = kn->id; return FILEID_KERNFS; } static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, bool get_parent) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_node *kn; struct inode *inode; u64 id; if (fh_len < 2) return NULL; switch (fh_type) { case FILEID_KERNFS: id = *(u64 *)fid; break; case FILEID_INO32_GEN: case FILEID_INO32_GEN_PARENT: /* * blk_log_action() exposes "LOW32,HIGH32" pair without * type and userland can call us with generic fid * constructed from them. Combine it back to ID. See * blk_log_action(). */ id = ((u64)fid->i32.gen << 32) | fid->i32.ino; break; default: return NULL; } kn = kernfs_find_and_get_node_by_id(info->root, id); if (!kn) return ERR_PTR(-ESTALE); if (get_parent) { struct kernfs_node *parent; parent = kernfs_get_parent(kn); kernfs_put(kn); kn = parent; if (!kn) return ERR_PTR(-ESTALE); } inode = kernfs_get_inode(sb, kn); kernfs_put(kn); return d_obtain_alias(inode); } static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false); } static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true); } static struct dentry *kernfs_get_parent_dentry(struct dentry *child) { struct kernfs_node *kn = kernfs_dentry_node(child); struct kernfs_root *root = kernfs_root(kn); guard(rwsem_read)(&root->kernfs_rwsem); return d_obtain_alias(kernfs_get_inode(child->d_sb, kernfs_parent(kn))); } static const struct export_operations kernfs_export_ops = { .encode_fh = kernfs_encode_fh, .fh_to_dentry = kernfs_fh_to_dentry, .fh_to_parent = kernfs_fh_to_parent, .get_parent = kernfs_get_parent_dentry, }; /** * kernfs_root_from_sb - determine kernfs_root associated with a super_block * @sb: the super_block in question * * Return: the kernfs_root associated with @sb. If @sb is not a kernfs one, * %NULL is returned. */ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) { if (sb->s_op == &kernfs_sops) return kernfs_info(sb)->root; return NULL; } /* * find the next ancestor in the path down to @child, where @parent was the * ancestor whose descendant we want to find. * * Say the path is /a/b/c/d. @child is d, @parent is %NULL. We return the root * node. If @parent is b, then we return the node for c. * Passing in d as @parent is not ok. */ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child, struct kernfs_node *parent) { if (child == parent) { pr_crit_once("BUG in find_next_ancestor: called with parent == child"); return NULL; } while (kernfs_parent(child) != parent) { child = kernfs_parent(child); if (!child) return NULL; } return child; } /** * kernfs_node_dentry - get a dentry for the given kernfs_node * @kn: kernfs_node for which a dentry is needed * @sb: the kernfs super_block * * Return: the dentry pointer */ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, struct super_block *sb) { struct dentry *dentry; struct kernfs_node *knparent; struct kernfs_root *root; BUG_ON(sb->s_op != &kernfs_sops); dentry = dget(sb->s_root); /* Check if this is the root kernfs_node */ if (!rcu_access_pointer(kn->__parent)) return dentry; root = kernfs_root(kn); /* * As long as kn is valid, its parent can not vanish. This is cgroup's * kn so it can't have its parent replaced. Therefore it is safe to use * the ancestor node outside of the RCU or locked section. */ if (WARN_ON_ONCE(!(root->flags & KERNFS_ROOT_INVARIANT_PARENT))) return ERR_PTR(-EINVAL); scoped_guard(rcu) { knparent = find_next_ancestor(kn, NULL); } if (WARN_ON(!knparent)) { dput(dentry); return ERR_PTR(-EINVAL); } do { struct dentry *dtmp; struct kernfs_node *kntmp; const char *name; if (kn == knparent) return dentry; scoped_guard(rwsem_read, &root->kernfs_rwsem) { kntmp = find_next_ancestor(kn, knparent); if (WARN_ON(!kntmp)) { dput(dentry); return ERR_PTR(-EINVAL); } name = kstrdup(kernfs_rcu_name(kntmp), GFP_KERNEL); } if (!name) { dput(dentry); return ERR_PTR(-ENOMEM); } dtmp = lookup_noperm_positive_unlocked(&QSTR(name), dentry); dput(dentry); kfree(name); if (IS_ERR(dtmp)) return dtmp; knparent = kntmp; dentry = dtmp; } while (true); } static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *kf_root = kfc->root; struct inode *inode; struct dentry *root; info->sb = sb; /* Userspace would break if executables or devices appear on sysfs */ sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = kfc->magic; sb->s_op = &kernfs_sops; sb->s_xattr = kernfs_xattr_handlers; if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP) sb->s_export_op = &kernfs_export_ops; sb->s_time_gran = 1; /* sysfs dentries and inodes don't require IO to create */ sb->s_shrink->seeks = 0; /* get root inode, initialize and unlock it */ down_read(&kf_root->kernfs_rwsem); inode = kernfs_get_inode(sb, info->root->kn); up_read(&kf_root->kernfs_rwsem); if (!inode) { pr_debug("kernfs: could not get root inode\n"); return -ENOMEM; } /* instantiate and link root dentry */ root = d_make_root(inode); if (!root) { pr_debug("%s: could not get root dentry!\n", __func__); return -ENOMEM; } sb->s_root = root; set_default_d_op(sb, &kernfs_dops); return 0; } static int kernfs_test_super(struct super_block *sb, struct fs_context *fc) { struct kernfs_super_info *sb_info = kernfs_info(sb); struct kernfs_super_info *info = fc->s_fs_info; return sb_info->root == info->root && sb_info->ns == info->ns; } static int kernfs_set_super(struct super_block *sb, struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; kfc->ns_tag = NULL; return set_anon_super_fc(sb, fc); } /** * kernfs_super_ns - determine the namespace tag of a kernfs super_block * @sb: super_block of interest * * Return: the namespace tag associated with kernfs super_block @sb. */ const void *kernfs_super_ns(struct super_block *sb) { struct kernfs_super_info *info = kernfs_info(sb); return info->ns; } /** * kernfs_get_tree - kernfs filesystem access/retrieval helper * @fc: The filesystem context. * * This is to be called from each kernfs user's fs_context->ops->get_tree() * implementation, which should set the specified ->@fs_type and ->@flags, and * specify the hierarchy and namespace tag to mount via ->@root and ->@ns, * respectively. * * Return: %0 on success, -errno on failure. */ int kernfs_get_tree(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; struct super_block *sb; struct kernfs_super_info *info; int error; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->root = kfc->root; info->ns = kfc->ns_tag; INIT_LIST_HEAD(&info->node); fc->s_fs_info = info; sb = sget_fc(fc, kernfs_test_super, kernfs_set_super); if (IS_ERR(sb)) return PTR_ERR(sb); if (!sb->s_root) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *root = kfc->root; kfc->new_sb_created = true; error = kernfs_fill_super(sb, kfc); if (error) { deactivate_locked_super(sb); return error; } sb->s_flags |= SB_ACTIVE; uuid_t uuid; uuid_gen(&uuid); super_set_uuid(sb, uuid.b, sizeof(uuid)); down_write(&root->kernfs_supers_rwsem); list_add(&info->node, &info->root->supers); up_write(&root->kernfs_supers_rwsem); } fc->root = dget(sb->s_root); return 0; } void kernfs_free_fs_context(struct fs_context *fc) { /* Note that we don't deal with kfc->ns_tag here. */ kfree(fc->s_fs_info); fc->s_fs_info = NULL; } /** * kernfs_kill_sb - kill_sb for kernfs * @sb: super_block being killed * * This can be used directly for file_system_type->kill_sb(). If a kernfs * user needs extra cleanup, it can implement its own kill_sb() and call * this function at the end. */ void kernfs_kill_sb(struct super_block *sb) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *root = info->root; down_write(&root->kernfs_supers_rwsem); list_del(&info->node); up_write(&root->kernfs_supers_rwsem); /* * Remove the superblock from fs_supers/s_instances * so we can't find it, before freeing kernfs_super_info. */ kill_anon_super(sb); kfree(info); } static void __init kernfs_mutex_init(void) { int count; for (count = 0; count < NR_KERNFS_LOCKS; count++) mutex_init(&kernfs_locks->open_file_mutex[count]); } static void __init kernfs_lock_init(void) { kernfs_locks = kmalloc(sizeof(struct kernfs_global_locks), GFP_KERNEL); WARN_ON(!kernfs_locks); kernfs_mutex_init(); } void __init kernfs_init(void) { kernfs_node_cache = kmem_cache_create("kernfs_node_cache", sizeof(struct kernfs_node), 0, SLAB_PANIC, NULL); /* Creates slab cache for kernfs inode attributes */ kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache", sizeof(struct kernfs_iattrs), 0, SLAB_PANIC, NULL); kernfs_lock_init(); } |
| 1 1 29 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_GUE_H #define __NET_GUE_H /* Definitions for the GUE header, standard and private flags, lengths * of optional fields are below. * * Diagram of GUE header: * * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |Ver|C| Hlen | Proto/ctype | Standard flags |P| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Fields (optional) ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Private flags (optional, P bit is set) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Private fields (optional) ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * C bit indicates control message when set, data message when unset. * For a control message, proto/ctype is interpreted as a type of * control message. For data messages, proto/ctype is the IP protocol * of the next header. * * P bit indicates private flags field is present. The private flags * may refer to options placed after this field. */ #include <asm/byteorder.h> #include <linux/types.h> struct guehdr { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 hlen:5, control:1, version:2; #elif defined (__BIG_ENDIAN_BITFIELD) __u8 version:2, control:1, hlen:5; #else #error "Please fix <asm/byteorder.h>" #endif __u8 proto_ctype; __be16 flags; }; __be32 word; }; }; /* Standard flags in GUE header */ #define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */ #define GUE_LEN_PRIV 4 #define GUE_FLAGS_ALL (GUE_FLAG_PRIV) /* Private flags in the private option extension */ #define GUE_PFLAG_REMCSUM htonl(1U << 31) #define GUE_PLEN_REMCSUM 4 #define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) /* Functions to compute options length corresponding to flags. * If we ever have a lot of flags this can be potentially be * converted to a more optimized algorithm (table lookup * for instance). */ static inline size_t guehdr_flags_len(__be16 flags) { return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0); } static inline size_t guehdr_priv_flags_len(__be32 flags) { return 0; } /* Validate standard and private flags. Returns non-zero (meaning invalid) * if there is an unknown standard or private flags, or the options length for * the flags exceeds the options length specific in hlen of the GUE header. */ static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) { __be16 flags = guehdr->flags; size_t len; if (flags & ~GUE_FLAGS_ALL) return 1; len = guehdr_flags_len(flags); if (len > optlen) return 1; if (flags & GUE_FLAG_PRIV) { /* Private flags are last four bytes accounted in * guehdr_flags_len */ __be32 pflags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); if (pflags & ~GUE_PFLAGS_ALL) return 1; len += guehdr_priv_flags_len(pflags); if (len > optlen) return 1; } return 0; } #endif |
| 473 1652 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM skb #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SKB_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #undef FN #define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason); DEFINE_DROP_REASON(FN, FN) #undef FN #undef FNe #define FN(reason) { SKB_DROP_REASON_##reason, #reason }, #define FNe(reason) { SKB_DROP_REASON_##reason, #reason } /* * Tracepoint for free an sk_buff: */ TRACE_EVENT(kfree_skb, TP_PROTO(struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk), TP_ARGS(skb, location, reason, rx_sk), TP_STRUCT__entry( __field(void *, skbaddr) __field(void *, location) __field(void *, rx_sk) __field(unsigned short, protocol) __field(enum skb_drop_reason, reason) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; __entry->rx_sk = rx_sk; __entry->protocol = ntohs(skb->protocol); __entry->reason = reason; ), TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s", __entry->skbaddr, __entry->rx_sk, __entry->protocol, __entry->location, __print_symbolic(__entry->reason, DEFINE_DROP_REASON(FN, FNe))) ); #undef FN #undef FNe TRACE_EVENT(consume_skb, TP_PROTO(struct sk_buff *skb, void *location), TP_ARGS(skb, location), TP_STRUCT__entry( __field( void *, skbaddr) __field( void *, location) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; ), TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location) ); TRACE_EVENT(skb_copy_datagram_iovec, TP_PROTO(const struct sk_buff *skb, int len), TP_ARGS(skb, len), TP_STRUCT__entry( __field( const void *, skbaddr ) __field( int, len ) ), TP_fast_assign( __entry->skbaddr = skb; __entry->len = len; ), TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len) ); #endif /* _TRACE_SKB_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 10 18 8 3 1 2 19 20 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 | // SPDX-License-Identifier: GPL-2.0-only /* * lib/parser.c - simple parser for mount, etc. options. */ #include <linux/ctype.h> #include <linux/types.h> #include <linux/export.h> #include <linux/kstrtox.h> #include <linux/parser.h> #include <linux/slab.h> #include <linux/string.h> /* * max size needed by different bases to express U64 * HEX: "0xFFFFFFFFFFFFFFFF" --> 18 * DEC: "18446744073709551615" --> 20 * OCT: "01777777777777777777777" --> 23 * pick the max one to define NUMBER_BUF_LEN */ #define NUMBER_BUF_LEN 24 /** * match_one - Determines if a string matches a simple pattern * @s: the string to examine for presence of the pattern * @p: the string containing the pattern * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Determines if the pattern @p is present in string @s. Can only * match extremely simple token=arg style patterns. If the pattern is found, * the location(s) of the arguments will be returned in the @args array. */ static int match_one(char *s, const char *p, substring_t args[]) { char *meta; int argc = 0; if (!p) return 1; while(1) { int len = -1; meta = strchr(p, '%'); if (!meta) return strcmp(p, s) == 0; if (strncmp(p, s, meta-p)) return 0; s += meta - p; p = meta + 1; if (isdigit(*p)) len = simple_strtoul(p, (char **) &p, 10); else if (*p == '%') { if (*s++ != '%') return 0; p++; continue; } if (argc >= MAX_OPT_ARGS) return 0; args[argc].from = s; switch (*p++) { case 's': { size_t str_len = strlen(s); if (str_len == 0) return 0; if (len == -1 || len > str_len) len = str_len; args[argc].to = s + len; break; } case 'd': simple_strtol(s, &args[argc].to, 0); goto num; case 'u': simple_strtoul(s, &args[argc].to, 0); goto num; case 'o': simple_strtoul(s, &args[argc].to, 8); goto num; case 'x': simple_strtoul(s, &args[argc].to, 16); num: if (args[argc].to == args[argc].from) return 0; break; default: return 0; } s = args[argc].to; argc++; } } /** * match_token - Find a token (and optional args) in a string * @s: the string to examine for token/argument pairs * @table: match_table_t describing the set of allowed option tokens and the * arguments that may be associated with them. Must be terminated with a * &struct match_token whose pattern is set to the NULL pointer. * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Detects which if any of a set of token strings has been passed * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style * format identifiers which will be taken into account when matching the * tokens, and whose locations will be returned in the @args array. */ int match_token(char *s, const match_table_t table, substring_t args[]) { const struct match_token *p; for (p = table; !match_one(s, p->pattern, args) ; p++) ; return p->token; } EXPORT_SYMBOL(match_token); /** * match_number - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting integer on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char buf[NUMBER_BUF_LEN]; int ret; long val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = 0; val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; else if (val < (long)INT_MIN || val > (long)INT_MAX) ret = -ERANGE; else *result = (int) val; return ret; } /** * match_u64int - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting u64 on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_u64int(substring_t *s, u64 *result, int base) { char buf[NUMBER_BUF_LEN]; int ret; u64 val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = kstrtoull(buf, base, &val); if (!ret) *result = val; return ret; } /** * match_int - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_int(substring_t *s, int *result) { return match_number(s, result, 0); } EXPORT_SYMBOL(match_int); /** * match_uint - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_uint(substring_t *s, unsigned int *result) { char buf[NUMBER_BUF_LEN]; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; return kstrtouint(buf, 10, result); } EXPORT_SYMBOL(match_uint); /** * match_u64 - scan a decimal representation of a u64 from * a substring_t * @s: substring_t to be scanned * @result: resulting unsigned long long on success * * Description: Attempts to parse the &substring_t @s as a long decimal * integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_u64(substring_t *s, u64 *result) { return match_u64int(s, result, 0); } EXPORT_SYMBOL(match_u64); /** * match_octal - scan an octal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as an octal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_octal(substring_t *s, int *result) { return match_number(s, result, 8); } EXPORT_SYMBOL(match_octal); /** * match_hex - scan a hex representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_hex(substring_t *s, int *result) { return match_number(s, result, 16); } EXPORT_SYMBOL(match_hex); /** * match_wildcard - parse if a string matches given wildcard pattern * @pattern: wildcard pattern * @str: the string to be parsed * * Description: Parse the string @str to check if matches wildcard * pattern @pattern. The pattern may contain two types of wildcards: * * * '*' - matches zero or more characters * * '?' - matches one character * * Return: If the @str matches the @pattern, return true, else return false. */ bool match_wildcard(const char *pattern, const char *str) { const char *s = str; const char *p = pattern; bool star = false; while (*s) { switch (*p) { case '?': s++; p++; break; case '*': star = true; str = s; if (!*++p) return true; pattern = p; break; default: if (*s == *p) { s++; p++; } else { if (!star) return false; str++; s = str; p = pattern; } break; } } if (*p == '*') ++p; return !*p; } EXPORT_SYMBOL(match_wildcard); /** * match_strlcpy - Copy the characters from a substring_t to a sized buffer * @dest: where to copy to * @src: &substring_t to copy * @size: size of destination buffer * * Description: Copy the characters in &substring_t @src to the * c-style string @dest. Copy no more than @size - 1 characters, plus * the terminating NUL. * * Return: length of @src. */ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) { size_t ret = src->to - src->from; if (size) { size_t len = ret >= size ? size - 1 : ret; memcpy(dest, src->from, len); dest[len] = '\0'; } return ret; } EXPORT_SYMBOL(match_strlcpy); /** * match_strdup - allocate a new string with the contents of a substring_t * @s: &substring_t to copy * * Description: Allocates and returns a string filled with the contents of * the &substring_t @s. The caller is responsible for freeing the returned * string with kfree(). * * Return: the address of the newly allocated NUL-terminated string or * %NULL on error. */ char *match_strdup(const substring_t *s) { return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL); } EXPORT_SYMBOL(match_strdup); |
| 1 1 1 1 1 1 2 2 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 | // SPDX-License-Identifier: GPL-2.0-only /* * VMware vSockets Driver * * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. */ /* Implementation notes: * * - There are two kinds of sockets: those created by user action (such as * calling socket(2)) and those created by incoming connection request packets. * * - There are two "global" tables, one for bound sockets (sockets that have * specified an address that they are responsible for) and one for connected * sockets (sockets that have established a connection with another socket). * These tables are "global" in that all sockets on the system are placed * within them. - Note, though, that the bound table contains an extra entry * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in * that list. The bound table is used solely for lookup of sockets when packets * are received and that's not necessary for SOCK_DGRAM sockets since we create * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM * sockets out of the bound hash buckets will reduce the chance of collisions * when looking for SOCK_STREAM sockets and prevents us from having to check the * socket type in the hash table lookups. * * - Sockets created by user action will either be "client" sockets that * initiate a connection or "server" sockets that listen for connections; we do * not support simultaneous connects (two "client" sockets connecting). * * - "Server" sockets are referred to as listener sockets throughout this * implementation because they are in the TCP_LISTEN state. When a * connection request is received (the second kind of socket mentioned above), * we create a new socket and refer to it as a pending socket. These pending * sockets are placed on the pending connection list of the listener socket. * When future packets are received for the address the listener socket is * bound to, we check if the source of the packet is from one that has an * existing pending connection. If it does, we process the packet for the * pending socket. When that socket reaches the connected state, it is removed * from the listener socket's pending list and enqueued in the listener * socket's accept queue. Callers of accept(2) will accept connected sockets * from the listener socket's accept queue. If the socket cannot be accepted * for some reason then it is marked rejected. Once the connection is * accepted, it is owned by the user process and the responsibility for cleanup * falls with that user process. * * - It is possible that these pending sockets will never reach the connected * state; in fact, we may never receive another packet after the connection * request. Because of this, we must schedule a cleanup function to run in the * future, after some amount of time passes where a connection should have been * established. This function ensures that the socket is off all lists so it * cannot be retrieved, then drops all references to the socket so it is cleaned * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this * function will also cleanup rejected sockets, those that reach the connected * state but leave it before they have been accepted. * * - Lock ordering for pending or accept queue sockets is: * * lock_sock(listener); * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); * * Using explicit nested locking keeps lockdep happy since normally only one * lock of a given class may be taken at a time. * * - Sockets created by user action will be cleaned up when the user process * calls close(2), causing our release implementation to be called. Our release * implementation will perform some cleanup then drop the last reference so our * sk_destruct implementation is invoked. Our sk_destruct implementation will * perform additional cleanup that's common for both types of sockets. * * - A socket's reference count is what ensures that the structure won't be * freed. Each entry in a list (such as the "global" bound and connected tables * and the listener socket's pending list and connected queue) ensures a * reference. When we defer work until process context and pass a socket as our * argument, we must ensure the reference count is increased to ensure the * socket isn't freed before the function is run; the deferred function will * then drop the reference. * * - sk->sk_state uses the TCP state constants because they are widely used by * other address families and exposed to userspace tools like ss(8): * * TCP_CLOSE - unconnected * TCP_SYN_SENT - connecting * TCP_ESTABLISHED - connected * TCP_CLOSING - disconnecting * TCP_LISTEN - listening */ #include <linux/compat.h> #include <linux/types.h> #include <linux/bitops.h> #include <linux/cred.h> #include <linux/errqueue.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/net.h> #include <linux/poll.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <linux/socket.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <net/sock.h> #include <net/af_vsock.h> #include <uapi/linux/vm_sockets.h> #include <uapi/asm-generic/ioctls.h> static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); static void vsock_close(struct sock *sk, long timeout); /* Protocol family. */ struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), .close = vsock_close, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = vsock_bpf_update_proto, #endif }; /* The default peer timeout indicates how long we will wait for a peer response * to a control message. */ #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256) #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256) #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128 /* Transport used for host->guest communication */ static const struct vsock_transport *transport_h2g; /* Transport used for guest->host communication */ static const struct vsock_transport *transport_g2h; /* Transport used for DGRAM communication */ static const struct vsock_transport *transport_dgram; /* Transport used for local communication */ static const struct vsock_transport *transport_local; static DEFINE_MUTEX(vsock_register_mutex); /**** UTILS ****/ /* Each bound VSocket is stored in the bind hash table and each connected * VSocket is stored in the connected hash table. * * Unbound sockets are all put on the same list attached to the end of the hash * table (vsock_unbound_sockets). Bound sockets are added to the hash table in * the bucket that their local address hashes to (vsock_bound_sockets(addr) * represents the list that addr hashes to). * * Specifically, we initialize the vsock_bind_table array to a size of * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function * mods with VSOCK_HASH_SIZE to ensure this. */ #define MAX_PORT_RETRIES 24 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) /* XXX This can probably be implemented in a better way. */ #define VSOCK_CONN_HASH(src, dst) \ (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) #define vsock_connected_sockets(src, dst) \ (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) #define vsock_connected_sockets_vsk(vsk) \ vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; EXPORT_SYMBOL_GPL(vsock_bind_table); struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; EXPORT_SYMBOL_GPL(vsock_connected_table); DEFINE_SPINLOCK(vsock_table_lock); EXPORT_SYMBOL_GPL(vsock_table_lock); /* Autobind this socket to the local address if necessary. */ static int vsock_auto_bind(struct vsock_sock *vsk) { struct sock *sk = sk_vsock(vsk); struct sockaddr_vm local_addr; if (vsock_addr_bound(&vsk->local_addr)) return 0; vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); return __vsock_bind(sk, &local_addr); } static void vsock_init_tables(void) { int i; for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) INIT_LIST_HEAD(&vsock_bind_table[i]); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) INIT_LIST_HEAD(&vsock_connected_table[i]); } static void __vsock_insert_bound(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->bound_table, list); } static void __vsock_insert_connected(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->connected_table, list); } static void __vsock_remove_bound(struct vsock_sock *vsk) { list_del_init(&vsk->bound_table); sock_put(&vsk->sk); } static void __vsock_remove_connected(struct vsock_sock *vsk) { list_del_init(&vsk->connected_table); sock_put(&vsk->sk); } static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) { if (vsock_addr_equals_addr(addr, &vsk->local_addr)) return sk_vsock(vsk); if (addr->svm_port == vsk->local_addr.svm_port && (vsk->local_addr.svm_cid == VMADDR_CID_ANY || addr->svm_cid == VMADDR_CID_ANY)) return sk_vsock(vsk); } return NULL; } static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_connected_sockets(src, dst), connected_table) { if (vsock_addr_equals_addr(src, &vsk->remote_addr) && dst->svm_port == vsk->local_addr.svm_port) { return sk_vsock(vsk); } } return NULL; } static void vsock_insert_unbound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); __vsock_insert_bound(vsock_unbound_sockets, vsk); spin_unlock_bh(&vsock_table_lock); } void vsock_insert_connected(struct vsock_sock *vsk) { struct list_head *list = vsock_connected_sockets( &vsk->remote_addr, &vsk->local_addr); spin_lock_bh(&vsock_table_lock); __vsock_insert_connected(list, vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_insert_connected); void vsock_remove_bound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); if (__vsock_in_bound_table(vsk)) __vsock_remove_bound(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_bound); void vsock_remove_connected(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); if (__vsock_in_connected_table(vsk)) __vsock_remove_connected(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_connected); struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_bound_socket(addr); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_bound_socket); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_connected_socket(src, dst); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_connected_socket); void vsock_remove_sock(struct vsock_sock *vsk) { /* Transport reassignment must not remove the binding. */ if (sock_flag(sk_vsock(vsk), SOCK_DEAD)) vsock_remove_bound(vsk); vsock_remove_connected(vsk); } EXPORT_SYMBOL_GPL(vsock_remove_sock); void vsock_for_each_connected_socket(struct vsock_transport *transport, void (*fn)(struct sock *sk)) { int i; spin_lock_bh(&vsock_table_lock); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], connected_table) { if (vsk->transport != transport) continue; fn(sk_vsock(vsk)); } } spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); void vsock_add_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vlistener; struct vsock_sock *vpending; vlistener = vsock_sk(listener); vpending = vsock_sk(pending); sock_hold(pending); sock_hold(listener); list_add_tail(&vpending->pending_links, &vlistener->pending_links); } EXPORT_SYMBOL_GPL(vsock_add_pending); void vsock_remove_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vpending = vsock_sk(pending); list_del_init(&vpending->pending_links); sock_put(listener); sock_put(pending); } EXPORT_SYMBOL_GPL(vsock_remove_pending); void vsock_enqueue_accept(struct sock *listener, struct sock *connected) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); vconnected = vsock_sk(connected); sock_hold(connected); sock_hold(listener); list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); } EXPORT_SYMBOL_GPL(vsock_enqueue_accept); static bool vsock_use_local_transport(unsigned int remote_cid) { lockdep_assert_held(&vsock_register_mutex); if (!transport_local) return false; if (remote_cid == VMADDR_CID_LOCAL) return true; if (transport_g2h) { return remote_cid == transport_g2h->get_local_cid(); } else { return remote_cid == VMADDR_CID_HOST; } } static void vsock_deassign_transport(struct vsock_sock *vsk) { if (!vsk->transport) return; vsk->transport->destruct(vsk); module_put(vsk->transport->module); vsk->transport = NULL; } /* Assign a transport to a socket and call the .init transport callback. * * Note: for connection oriented socket this must be called when vsk->remote_addr * is set (e.g. during the connect() or when a connection request on a listener * socket is received). * The vsk->remote_addr is used to decide which transport to use: * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if * g2h is not loaded, will use local transport; * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport; * - remote CID > VMADDR_CID_HOST will use host->guest transport; */ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) { const struct vsock_transport *new_transport; struct sock *sk = sk_vsock(vsk); unsigned int remote_cid = vsk->remote_addr.svm_cid; __u8 remote_flags; int ret; /* If the packet is coming with the source and destination CIDs higher * than VMADDR_CID_HOST, then a vsock channel where all the packets are * forwarded to the host should be established. Then the host will * need to forward the packets to the guest. * * The flag is set on the (listen) receive path (psk is not NULL). On * the connect path the flag can be set by the user space application. */ if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST && vsk->remote_addr.svm_cid > VMADDR_CID_HOST) vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST; remote_flags = vsk->remote_addr.svm_flags; mutex_lock(&vsock_register_mutex); switch (sk->sk_type) { case SOCK_DGRAM: new_transport = transport_dgram; break; case SOCK_STREAM: case SOCK_SEQPACKET: if (vsock_use_local_transport(remote_cid)) new_transport = transport_local; else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g || (remote_flags & VMADDR_FLAG_TO_HOST)) new_transport = transport_g2h; else new_transport = transport_h2g; break; default: ret = -ESOCKTNOSUPPORT; goto err; } if (vsk->transport && vsk->transport == new_transport) { ret = 0; goto err; } /* We increase the module refcnt to prevent the transport unloading * while there are open sockets assigned to it. */ if (!new_transport || !try_module_get(new_transport->module)) { ret = -ENODEV; goto err; } /* It's safe to release the mutex after a successful try_module_get(). * Whichever transport `new_transport` points at, it won't go away until * the last module_put() below or in vsock_deassign_transport(). */ mutex_unlock(&vsock_register_mutex); if (vsk->transport) { /* transport->release() must be called with sock lock acquired. * This path can only be taken during vsock_connect(), where we * have already held the sock lock. In the other cases, this * function is called on a new socket which is not assigned to * any transport. */ vsk->transport->release(vsk); vsock_deassign_transport(vsk); /* transport's release() and destruct() can touch some socket * state, since we are reassigning the socket to a new transport * during vsock_connect(), let's reset these fields to have a * clean state. */ sock_reset_flag(sk, SOCK_DONE); sk->sk_state = TCP_CLOSE; vsk->peer_shutdown = 0; } if (sk->sk_type == SOCK_SEQPACKET) { if (!new_transport->seqpacket_allow || !new_transport->seqpacket_allow(remote_cid)) { module_put(new_transport->module); return -ESOCKTNOSUPPORT; } } ret = new_transport->init(vsk, psk); if (ret) { module_put(new_transport->module); return ret; } vsk->transport = new_transport; return 0; err: mutex_unlock(&vsock_register_mutex); return ret; } EXPORT_SYMBOL_GPL(vsock_assign_transport); /* * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks. * Otherwise we may race with module removal. Do not use on `vsk->transport`. */ static u32 vsock_registered_transport_cid(const struct vsock_transport **transport) { u32 cid = VMADDR_CID_ANY; mutex_lock(&vsock_register_mutex); if (*transport) cid = (*transport)->get_local_cid(); mutex_unlock(&vsock_register_mutex); return cid; } bool vsock_find_cid(unsigned int cid) { if (cid == vsock_registered_transport_cid(&transport_g2h)) return true; if (transport_h2g && cid == VMADDR_CID_HOST) return true; if (transport_local && cid == VMADDR_CID_LOCAL) return true; return false; } EXPORT_SYMBOL_GPL(vsock_find_cid); static struct sock *vsock_dequeue_accept(struct sock *listener) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); if (list_empty(&vlistener->accept_queue)) return NULL; vconnected = list_entry(vlistener->accept_queue.next, struct vsock_sock, accept_queue); list_del_init(&vconnected->accept_queue); sock_put(listener); /* The caller will need a reference on the connected socket so we let * it call sock_put(). */ return sk_vsock(vconnected); } static bool vsock_is_accept_queue_empty(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return list_empty(&vsk->accept_queue); } static bool vsock_is_pending(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return !list_empty(&vsk->pending_links); } static int vsock_send_shutdown(struct sock *sk, int mode) { struct vsock_sock *vsk = vsock_sk(sk); if (!vsk->transport) return -ENODEV; return vsk->transport->shutdown(vsk, mode); } static void vsock_pending_work(struct work_struct *work) { struct sock *sk; struct sock *listener; struct vsock_sock *vsk; bool cleanup; vsk = container_of(work, struct vsock_sock, pending_work.work); sk = sk_vsock(vsk); listener = vsk->listener; cleanup = true; lock_sock(listener); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (vsock_is_pending(sk)) { vsock_remove_pending(listener, sk); sk_acceptq_removed(listener); } else if (!vsk->rejected) { /* We are not on the pending list and accept() did not reject * us, so we must have been accepted by our user process. We * just need to drop our references to the sockets and be on * our way. */ cleanup = false; goto out; } /* We need to remove ourself from the global connected sockets list so * incoming packets can't find this socket, and to reduce the reference * count. */ vsock_remove_connected(vsk); sk->sk_state = TCP_CLOSE; out: release_sock(sk); release_sock(listener); if (cleanup) sock_put(sk); sock_put(sk); sock_put(listener); } /**** SOCKET OPERATIONS ****/ static int __vsock_bind_connectible(struct vsock_sock *vsk, struct sockaddr_vm *addr) { static u32 port; struct sockaddr_vm new_addr; if (!port) port = get_random_u32_above(LAST_RESERVED_PORT); vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); if (addr->svm_port == VMADDR_PORT_ANY) { bool found = false; unsigned int i; for (i = 0; i < MAX_PORT_RETRIES; i++) { if (port == VMADDR_PORT_ANY || port <= LAST_RESERVED_PORT) port = LAST_RESERVED_PORT + 1; new_addr.svm_port = port++; if (!__vsock_find_bound_socket(&new_addr)) { found = true; break; } } if (!found) return -EADDRNOTAVAIL; } else { /* If port is in reserved range, ensure caller * has necessary privileges. */ if (addr->svm_port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE)) { return -EACCES; } if (__vsock_find_bound_socket(&new_addr)) return -EADDRINUSE; } vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); /* Remove connection oriented sockets from the unbound list and add them * to the hash table for easy lookup by its address. The unbound list * is simply an extra entry at the end of the hash table, a trick used * by AF_UNIX. */ __vsock_remove_bound(vsk); __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); return 0; } static int __vsock_bind_dgram(struct vsock_sock *vsk, struct sockaddr_vm *addr) { return vsk->transport->dgram_bind(vsk, addr); } static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) { struct vsock_sock *vsk = vsock_sk(sk); int retval; /* First ensure this socket isn't already bound. */ if (vsock_addr_bound(&vsk->local_addr)) return -EINVAL; /* Now bind to the provided address or select appropriate values if * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that * like AF_INET prevents binding to a non-local IP address (in most * cases), we only allow binding to a local CID. */ if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid)) return -EADDRNOTAVAIL; switch (sk->sk_socket->type) { case SOCK_STREAM: case SOCK_SEQPACKET: spin_lock_bh(&vsock_table_lock); retval = __vsock_bind_connectible(vsk, addr); spin_unlock_bh(&vsock_table_lock); break; case SOCK_DGRAM: retval = __vsock_bind_dgram(vsk, addr); break; default: retval = -EINVAL; break; } return retval; } static void vsock_connect_timeout(struct work_struct *work); static struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, gfp_t priority, unsigned short type, int kern) { struct sock *sk; struct vsock_sock *psk; struct vsock_sock *vsk; sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); if (!sk) return NULL; sock_init_data(sock, sk); /* sk->sk_type is normally set in sock_init_data, but only if sock is * non-NULL. We make sure that our sockets always have a type by * setting it here if needed. */ if (!sock) sk->sk_type = type; vsk = vsock_sk(sk); vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sk->sk_destruct = vsock_sk_destruct; sk->sk_backlog_rcv = vsock_queue_rcv_skb; sock_reset_flag(sk, SOCK_DONE); INIT_LIST_HEAD(&vsk->bound_table); INIT_LIST_HEAD(&vsk->connected_table); vsk->listener = NULL; INIT_LIST_HEAD(&vsk->pending_links); INIT_LIST_HEAD(&vsk->accept_queue); vsk->rejected = false; vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); psk = parent ? vsock_sk(parent) : NULL; if (parent) { vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; vsk->buffer_size = psk->buffer_size; vsk->buffer_min_size = psk->buffer_min_size; vsk->buffer_max_size = psk->buffer_max_size; security_sk_clone(parent, sk); } else { vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE; vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE; vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE; } return sk; } static bool sock_type_connectible(u16 type) { return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET); } static void __vsock_release(struct sock *sk, int level) { struct vsock_sock *vsk; struct sock *pending; vsk = vsock_sk(sk); pending = NULL; /* Compiler warning. */ /* When "level" is SINGLE_DEPTH_NESTING, use the nested * version to avoid the warning "possible recursive locking * detected". When "level" is 0, lock_sock_nested(sk, level) * is the same as lock_sock(sk). */ lock_sock_nested(sk, level); /* Indicate to vsock_remove_sock() that the socket is being released and * can be removed from the bound_table. Unlike transport reassignment * case, where the socket must remain bound despite vsock_remove_sock() * being called from the transport release() callback. */ sock_set_flag(sk, SOCK_DEAD); if (vsk->transport) vsk->transport->release(vsk); else if (sock_type_connectible(sk->sk_type)) vsock_remove_sock(vsk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; skb_queue_purge(&sk->sk_receive_queue); /* Clean up any sockets that never were accepted. */ while ((pending = vsock_dequeue_accept(sk)) != NULL) { __vsock_release(pending, SINGLE_DEPTH_NESTING); sock_put(pending); } release_sock(sk); sock_put(sk); } static void vsock_sk_destruct(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); /* Flush MSG_ZEROCOPY leftovers. */ __skb_queue_purge(&sk->sk_error_queue); vsock_deassign_transport(vsk); /* When clearing these addresses, there's no need to set the family and * possibly register the address family with the kernel. */ vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); put_cred(vsk->owner); } static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; err = sock_queue_rcv_skb(sk, skb); if (err) kfree_skb(skb); return err; } struct sock *vsock_create_connected(struct sock *parent) { return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL, parent->sk_type, 0); } EXPORT_SYMBOL_GPL(vsock_create_connected); s64 vsock_stream_has_data(struct vsock_sock *vsk) { if (WARN_ON(!vsk->transport)) return 0; return vsk->transport->stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_data); s64 vsock_connectible_has_data(struct vsock_sock *vsk) { struct sock *sk = sk_vsock(vsk); if (WARN_ON(!vsk->transport)) return 0; if (sk->sk_type == SOCK_SEQPACKET) return vsk->transport->seqpacket_has_data(vsk); else return vsock_stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_connectible_has_data); s64 vsock_stream_has_space(struct vsock_sock *vsk) { if (WARN_ON(!vsk->transport)) return 0; return vsk->transport->stream_has_space(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_space); void vsock_data_ready(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat || sock_flag(sk, SOCK_DONE)) sk->sk_data_ready(sk); } EXPORT_SYMBOL_GPL(vsock_data_ready); /* Dummy callback required by sockmap. * See unconditional call of saved_close() in sock_map_close(). */ static void vsock_close(struct sock *sk, long timeout) { } static int vsock_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; sk->sk_prot->close(sk, 0); __vsock_release(sk, 0); sock->sk = NULL; sock->state = SS_FREE; return 0; } static int vsock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { int err; struct sock *sk; struct sockaddr_vm *vm_addr; sk = sock->sk; if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) return -EINVAL; lock_sock(sk); err = __vsock_bind(sk, vm_addr); release_sock(sk); return err; } static int vsock_getname(struct socket *sock, struct sockaddr *addr, int peer) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *vm_addr; sk = sock->sk; vsk = vsock_sk(sk); err = 0; lock_sock(sk); if (peer) { if (sock->state != SS_CONNECTED) { err = -ENOTCONN; goto out; } vm_addr = &vsk->remote_addr; } else { vm_addr = &vsk->local_addr; } BUILD_BUG_ON(sizeof(*vm_addr) > sizeof(struct sockaddr_storage)); memcpy(addr, vm_addr, sizeof(*vm_addr)); err = sizeof(*vm_addr); out: release_sock(sk); return err; } void vsock_linger(struct sock *sk) { DEFINE_WAIT_FUNC(wait, woken_wake_function); ssize_t (*unsent)(struct vsock_sock *vsk); struct vsock_sock *vsk = vsock_sk(sk); long timeout; if (!sock_flag(sk, SOCK_LINGER)) return; timeout = sk->sk_lingertime; if (!timeout) return; /* Transports must implement `unsent_bytes` if they want to support * SOCK_LINGER through `vsock_linger()` since we use it to check when * the socket can be closed. */ unsent = vsk->transport->unsent_bytes; if (!unsent) return; add_wait_queue(sk_sleep(sk), &wait); do { if (sk_wait_event(sk, &timeout, unsent(vsk) == 0, &wait)) break; } while (!signal_pending(current) && timeout); remove_wait_queue(sk_sleep(sk), &wait); } EXPORT_SYMBOL_GPL(vsock_linger); static int vsock_shutdown(struct socket *sock, int mode) { int err; struct sock *sk; /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode * here like the other address families do. Note also that the * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), * which is what we want. */ mode++; if ((mode & ~SHUTDOWN_MASK) || !mode) return -EINVAL; /* If this is a connection oriented socket and it is not connected then * bail out immediately. If it is a DGRAM socket then we must first * kick the socket so that it wakes up from any sleeping calls, for * example recv(), and then afterwards return the error. */ sk = sock->sk; lock_sock(sk); if (sock->state == SS_UNCONNECTED) { err = -ENOTCONN; if (sock_type_connectible(sk->sk_type)) goto out; } else { sock->state = SS_DISCONNECTING; err = 0; } /* Receive and send shutdowns are treated alike. */ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); if (mode) { sk->sk_shutdown |= mode; sk->sk_state_change(sk); if (sock_type_connectible(sk->sk_type)) { sock_reset_flag(sk, SOCK_DONE); vsock_send_shutdown(sk, mode); } } out: release_sock(sk); return err; } static __poll_t vsock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk; __poll_t mask; struct vsock_sock *vsk; sk = sock->sk; vsk = vsock_sk(sk); poll_wait(file, sk_sleep(sk), wait); mask = 0; if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) /* Signify that there has been an error on this socket. */ mask |= EPOLLERR; /* INET sockets treat local write shutdown and peer write shutdown as a * case of EPOLLHUP set. */ if ((sk->sk_shutdown == SHUTDOWN_MASK) || ((sk->sk_shutdown & SEND_SHUTDOWN) && (vsk->peer_shutdown & SEND_SHUTDOWN))) { mask |= EPOLLHUP; } if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= EPOLLRDHUP; } if (sk_is_readable(sk)) mask |= EPOLLIN | EPOLLRDNORM; if (sock->type == SOCK_DGRAM) { /* For datagram sockets we can read if there is something in * the queue and write as long as the socket isn't shutdown for * sending. */ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) { mask |= EPOLLIN | EPOLLRDNORM; } if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; } else if (sock_type_connectible(sk->sk_type)) { const struct vsock_transport *transport; lock_sock(sk); transport = vsk->transport; /* Listening sockets that have connections in their accept * queue can be read. */ if (sk->sk_state == TCP_LISTEN && !vsock_is_accept_queue_empty(sk)) mask |= EPOLLIN | EPOLLRDNORM; /* If there is something in the queue then we can read. */ if (transport && transport->stream_is_active(vsk) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { bool data_ready_now = false; int target = sock_rcvlowat(sk, 0, INT_MAX); int ret = transport->notify_poll_in( vsk, target, &data_ready_now); if (ret < 0) { mask |= EPOLLERR; } else { if (data_ready_now) mask |= EPOLLIN | EPOLLRDNORM; } } /* Sockets whose connections have been closed, reset, or * terminated should also be considered read, and we check the * shutdown flag for that. */ if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= EPOLLIN | EPOLLRDNORM; } /* Connected sockets that can produce data can be written. */ if (transport && sk->sk_state == TCP_ESTABLISHED) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { bool space_avail_now = false; int ret = transport->notify_poll_out( vsk, 1, &space_avail_now); if (ret < 0) { mask |= EPOLLERR; } else { if (space_avail_now) /* Remove EPOLLWRBAND since INET * sockets are not setting it. */ mask |= EPOLLOUT | EPOLLWRNORM; } } } /* Simulate INET socket poll behaviors, which sets * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, * but local send is not shutdown. */ if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= EPOLLOUT | EPOLLWRNORM; } release_sock(sk); } return mask; } static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor) { struct vsock_sock *vsk = vsock_sk(sk); if (WARN_ON_ONCE(!vsk->transport)) return -ENODEV; return vsk->transport->read_skb(vsk, read_actor); } static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; const struct vsock_transport *transport; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* For now, MSG_DONTWAIT is always assumed... */ err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); transport = vsk->transport; err = vsock_auto_bind(vsk); if (err) goto out; /* If the provided message contains an address, use that. Otherwise * fall back on the socket's remote handle (if it has been connected). */ if (msg->msg_name && vsock_addr_cast(msg->msg_name, msg->msg_namelen, &remote_addr) == 0) { /* Ensure this address is of the right type and is a valid * destination. */ if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); if (!vsock_addr_bound(remote_addr)) { err = -EINVAL; goto out; } } else if (sock->state == SS_CONNECTED) { remote_addr = &vsk->remote_addr; if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); /* XXX Should connect() or this function ensure remote_addr is * bound? */ if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EINVAL; goto out; } } else { err = -EINVAL; goto out; } if (!transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } err = transport->dgram_enqueue(vsk, remote_addr, msg, len); out: release_sock(sk); return err; } static int vsock_dgram_connect(struct socket *sock, struct sockaddr_unsized *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; sk = sock->sk; vsk = vsock_sk(sk); err = vsock_addr_cast(addr, addr_len, &remote_addr); if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { lock_sock(sk); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sock->state = SS_UNCONNECTED; release_sock(sk); return 0; } else if (err != 0) return -EINVAL; lock_sock(sk); err = vsock_auto_bind(vsk); if (err) goto out; if (!vsk->transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); sock->state = SS_CONNECTED; /* sock map disallows redirection of non-TCP sockets with sk_state != * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams. * * This doesn't seem to be abnormal state for datagram sockets, as the * same approach can be see in other datagram socket types as well * (such as unix sockets). */ sk->sk_state = TCP_ESTABLISHED; out: release_sock(sk); return err; } int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct vsock_sock *vsk = vsock_sk(sk); return vsk->transport->dgram_dequeue(vsk, msg, len, flags); } int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { #ifdef CONFIG_BPF_SYSCALL struct sock *sk = sock->sk; const struct proto *prot; prot = READ_ONCE(sk->sk_prot); if (prot != &vsock_proto) return prot->recvmsg(sk, msg, len, flags, NULL); #endif return __vsock_dgram_recvmsg(sock, msg, len, flags); } EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg); static int vsock_do_ioctl(struct socket *sock, unsigned int cmd, int __user *arg) { struct sock *sk = sock->sk; struct vsock_sock *vsk; int ret; vsk = vsock_sk(sk); switch (cmd) { case SIOCINQ: { ssize_t n_bytes; if (!vsk->transport) { ret = -EOPNOTSUPP; break; } if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) { ret = -EINVAL; break; } n_bytes = vsock_stream_has_data(vsk); if (n_bytes < 0) { ret = n_bytes; break; } ret = put_user(n_bytes, arg); break; } case SIOCOUTQ: { ssize_t n_bytes; if (!vsk->transport || !vsk->transport->unsent_bytes) { ret = -EOPNOTSUPP; break; } if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) { ret = -EINVAL; break; } n_bytes = vsk->transport->unsent_bytes(vsk); if (n_bytes < 0) { ret = n_bytes; break; } ret = put_user(n_bytes, arg); break; } default: ret = -ENOIOCTLCMD; } return ret; } static int vsock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int ret; lock_sock(sock->sk); ret = vsock_do_ioctl(sock, cmd, (int __user *)arg); release_sock(sock->sk); return ret; } static const struct proto_ops vsock_dgram_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, .read_skb = vsock_read_skb, }; static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) { const struct vsock_transport *transport = vsk->transport; if (!transport || !transport->cancel_pkt) return -EOPNOTSUPP; return transport->cancel_pkt(vsk); } static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; vsk = container_of(work, struct vsock_sock, connect_work.work); sk = sk_vsock(vsk); lock_sock(sk); if (sk->sk_state == TCP_SYN_SENT && (sk->sk_shutdown != SHUTDOWN_MASK)) { sk->sk_state = TCP_CLOSE; sk->sk_socket->state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk_error_report(sk); vsock_transport_cancel_pkt(vsk); } release_sock(sk); sock_put(sk); } static int vsock_connect(struct socket *sock, struct sockaddr_unsized *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; struct sockaddr_vm *remote_addr; long timeout; DEFINE_WAIT(wait); err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ switch (sock->state) { case SS_CONNECTED: err = -EISCONN; goto out; case SS_DISCONNECTING: err = -EINVAL; goto out; case SS_CONNECTING: /* This continues on so we can move sock into the SS_CONNECTED * state once the connection has completed (at which point err * will be set to zero also). Otherwise, we will either wait * for the connection or return -EALREADY should this be a * non-blocking call. */ err = -EALREADY; if (flags & O_NONBLOCK) goto out; break; default: if ((sk->sk_state == TCP_LISTEN) || vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { err = -EINVAL; goto out; } /* Set the remote address that we are connecting to. */ memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); err = vsock_assign_transport(vsk, NULL); if (err) goto out; transport = vsk->transport; /* The hypervisor and well-known contexts do not have socket * endpoints. */ if (!transport || !transport->stream_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -ENETUNREACH; goto out; } if (vsock_msgzerocopy_allow(transport)) { set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); } else if (sock_flag(sk, SOCK_ZEROCOPY)) { /* If this option was set before 'connect()', * when transport was unknown, check that this * feature is supported here. */ err = -EOPNOTSUPP; goto out; } err = vsock_auto_bind(vsk); if (err) goto out; sk->sk_state = TCP_SYN_SENT; err = transport->connect(vsk); if (err < 0) goto out; /* sk_err might have been set as a result of an earlier * (failed) connect attempt. */ sk->sk_err = 0; /* Mark sock as connecting and set the error code to in * progress in case this is a non-blocking connect. */ sock->state = SS_CONNECTING; err = -EINPROGRESS; } /* The receive path will handle all communication until we are able to * enter the connected state. Here we wait for the connection to be * completed or a notification of an error. */ timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* If the socket is already closing or it is in an error state, there * is no point in waiting. */ while (sk->sk_state != TCP_ESTABLISHED && sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection * attempt, in case the peer doesn't respond in a * timely manner. We hold on to the socket until the * timeout fires. */ sock_hold(sk); /* If the timeout function is already scheduled, * reschedule it, then ungrab the socket refcount to * keep it balanced. */ if (mod_delayed_work(system_percpu_wq, &vsk->connect_work, timeout)) sock_put(sk); /* Skip ahead to preserve error code set above. */ goto out_wait; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); vsock_remove_connected(vsk); goto out_wait; } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) { err = -ETIMEDOUT; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } if (sk->sk_err) { err = -sk->sk_err; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; } else { err = 0; } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; } static int vsock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *listener; int err; struct sock *connected; struct vsock_sock *vconnected; long timeout; DEFINE_WAIT(wait); err = 0; listener = sock->sk; lock_sock(listener); if (!sock_type_connectible(sock->type)) { err = -EOPNOTSUPP; goto out; } if (listener->sk_state != TCP_LISTEN) { err = -EINVAL; goto out; } /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); finish_wait(sk_sleep(listener), &wait); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out; } else if (timeout == 0) { err = -EAGAIN; goto out; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(listener), &wait); if (listener->sk_err) err = -listener->sk_err; if (connected) { sk_acceptq_removed(listener); lock_sock_nested(connected, SINGLE_DEPTH_NESTING); vconnected = vsock_sk(connected); /* If the listener socket has received an error, then we should * reject this socket and return. Note that we simply mark the * socket rejected, drop our reference, and let the cleanup * function handle the cleanup; the fact that we found it in * the listener's accept queue guarantees that the cleanup * function hasn't run yet. */ if (err) { vconnected->rejected = true; } else { newsock->state = SS_CONNECTED; sock_graft(connected, newsock); if (vsock_msgzerocopy_allow(vconnected->transport)) set_bit(SOCK_SUPPORT_ZC, &connected->sk_socket->flags); } release_sock(connected); sock_put(connected); } out: release_sock(listener); return err; } static int vsock_listen(struct socket *sock, int backlog) { int err; struct sock *sk; struct vsock_sock *vsk; sk = sock->sk; lock_sock(sk); if (!sock_type_connectible(sk->sk_type)) { err = -EOPNOTSUPP; goto out; } if (sock->state != SS_UNCONNECTED) { err = -EINVAL; goto out; } vsk = vsock_sk(sk); if (!vsock_addr_bound(&vsk->local_addr)) { err = -EINVAL; goto out; } sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; err = 0; out: release_sock(sk); return err; } static void vsock_update_buffer_size(struct vsock_sock *vsk, const struct vsock_transport *transport, u64 val) { if (val > vsk->buffer_max_size) val = vsk->buffer_max_size; if (val < vsk->buffer_min_size) val = vsk->buffer_min_size; if (val != vsk->buffer_size && transport && transport->notify_buffer_size) transport->notify_buffer_size(vsk, &val); vsk->buffer_size = val; } static int vsock_connectible_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { int err; struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; u64 val; if (level != AF_VSOCK && level != SOL_SOCKET) return -ENOPROTOOPT; #define COPY_IN(_v) \ do { \ if (optlen < sizeof(_v)) { \ err = -EINVAL; \ goto exit; \ } \ if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \ err = -EFAULT; \ goto exit; \ } \ } while (0) err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); transport = vsk->transport; if (level == SOL_SOCKET) { int zerocopy; if (optname != SO_ZEROCOPY) { release_sock(sk); return sock_setsockopt(sock, level, optname, optval, optlen); } /* Use 'int' type here, because variable to * set this option usually has this type. */ COPY_IN(zerocopy); if (zerocopy < 0 || zerocopy > 1) { err = -EINVAL; goto exit; } if (transport && !vsock_msgzerocopy_allow(transport)) { err = -EOPNOTSUPP; goto exit; } sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy); goto exit; } switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: COPY_IN(val); vsock_update_buffer_size(vsk, transport, val); break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: COPY_IN(val); vsk->buffer_max_size = val; vsock_update_buffer_size(vsk, transport, vsk->buffer_size); break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: COPY_IN(val); vsk->buffer_min_size = val; vsock_update_buffer_size(vsk, transport, vsk->buffer_size); break; case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: { struct __kernel_sock_timeval tv; err = sock_copy_user_timeval(&tv, optval, optlen, optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); if (err) break; if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { vsk->connect_timeout = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ)); if (vsk->connect_timeout == 0) vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } else { err = -ERANGE; } break; } default: err = -ENOPROTOOPT; break; } #undef COPY_IN exit: release_sock(sk); return err; } static int vsock_connectible_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct vsock_sock *vsk = vsock_sk(sk); union { u64 val64; struct old_timeval32 tm32; struct __kernel_old_timeval tm; struct __kernel_sock_timeval stm; } v; int lv = sizeof(v.val64); int len; if (level != AF_VSOCK) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; memset(&v, 0, sizeof(v)); switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: v.val64 = vsk->buffer_size; break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: v.val64 = vsk->buffer_max_size; break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: v.val64 = vsk->buffer_min_size; break; case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: lv = sock_get_timeout(vsk->connect_timeout, &v, optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); break; default: return -ENOPROTOOPT; } if (len < lv) return -EINVAL; if (len > lv) len = lv; if (copy_to_user(optval, &v, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; ssize_t total_written; long timeout; int err; struct vsock_transport_send_notify_data send_data; DEFINE_WAIT_FUNC(wait, woken_wake_function); sk = sock->sk; vsk = vsock_sk(sk); total_written = 0; err = 0; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); transport = vsk->transport; /* Callers should not provide a destination with connection oriented * sockets. */ if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out; } /* Send data only if both sides are not shutdown in the direction. */ if (sk->sk_shutdown & SEND_SHUTDOWN || vsk->peer_shutdown & RCV_SHUTDOWN) { err = -EPIPE; goto out; } if (!transport || sk->sk_state != TCP_ESTABLISHED || !vsock_addr_bound(&vsk->local_addr)) { err = -ENOTCONN; goto out; } if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EDESTADDRREQ; goto out; } if (msg->msg_flags & MSG_ZEROCOPY && !vsock_msgzerocopy_allow(transport)) { err = -EOPNOTSUPP; goto out; } /* Wait for room in the produce queue to enqueue our user's data. */ timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = transport->notify_send_init(vsk, &send_data); if (err < 0) goto out; while (total_written < len) { ssize_t written; add_wait_queue(sk_sleep(sk), &wait); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && !(vsk->peer_shutdown & RCV_SHUTDOWN)) { /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); if (err < 0) { remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } release_sock(sk); timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } else if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } } remove_wait_queue(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after * sleeping. */ if (sk->sk_err) { err = -sk->sk_err; goto out_err; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; goto out_err; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) goto out_err; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is * smaller than the queue size. It is the caller's * responsibility to check how many bytes we were able to send. */ if (sk->sk_type == SOCK_SEQPACKET) { written = transport->seqpacket_enqueue(vsk, msg, len - total_written); } else { written = transport->stream_enqueue(vsk, msg, len - total_written); } if (written < 0) { err = written; goto out_err; } total_written += written; err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) goto out_err; } out_err: if (total_written > 0) { /* Return number of written bytes only if: * 1) SOCK_STREAM socket. * 2) SOCK_SEQPACKET socket when whole buffer is sent. */ if (sk->sk_type == SOCK_STREAM || total_written == len) err = total_written; } out: if (sk->sk_type == SOCK_STREAM) err = sk_stream_error(sk, msg->msg_flags, err); release_sock(sk); return err; } static int vsock_connectible_wait_data(struct sock *sk, struct wait_queue_entry *wait, long timeout, struct vsock_transport_recv_notify_data *recv_data, size_t target) { const struct vsock_transport *transport; struct vsock_sock *vsk; s64 data; int err; vsk = vsock_sk(sk); err = 0; transport = vsk->transport; while (1) { prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE); data = vsock_connectible_has_data(vsk); if (data != 0) break; if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) || (vsk->peer_shutdown & SEND_SHUTDOWN)) { break; } /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; break; } if (recv_data) { err = transport->notify_recv_pre_block(vsk, target, recv_data); if (err < 0) break; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); break; } else if (timeout == 0) { err = -EAGAIN; break; } } finish_wait(sk_sleep(sk), wait); if (err) return err; /* Internal transport error when checking for available * data. XXX This should be changed to a connection * reset in a later change. */ if (data < 0) return -ENOMEM; return data; } static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) { struct vsock_transport_recv_notify_data recv_data; const struct vsock_transport *transport; struct vsock_sock *vsk; ssize_t copied; size_t target; long timeout; int err; DEFINE_WAIT(wait); vsk = vsock_sk(sk); transport = vsk->transport; /* We must not copy less than target bytes into the user's buffer * before returning successfully, so we wait for the consume queue to * have that much data to consume before dequeueing. Note that this * makes it impossible to handle cases where target is greater than the * queue size. */ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (target >= transport->stream_rcvhiwat(vsk)) { err = -ENOMEM; goto out; } timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); copied = 0; err = transport->notify_recv_init(vsk, target, &recv_data); if (err < 0) goto out; while (1) { ssize_t read; err = vsock_connectible_wait_data(sk, &wait, timeout, &recv_data, target); if (err <= 0) break; err = transport->notify_recv_pre_dequeue(vsk, target, &recv_data); if (err < 0) break; read = transport->stream_dequeue(vsk, msg, len - copied, flags); if (read < 0) { err = read; break; } copied += read; err = transport->notify_recv_post_dequeue(vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) goto out; if (read >= target || flags & MSG_PEEK) break; target -= read; } if (sk->sk_err) err = -sk->sk_err; else if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; if (copied > 0) err = copied; out: return err; } static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) { const struct vsock_transport *transport; struct vsock_sock *vsk; ssize_t msg_len; long timeout; int err = 0; DEFINE_WAIT(wait); vsk = vsock_sk(sk); transport = vsk->transport; timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0); if (err <= 0) goto out; msg_len = transport->seqpacket_dequeue(vsk, msg, flags); if (msg_len < 0) { err = msg_len; goto out; } if (sk->sk_err) { err = -sk->sk_err; } else if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; } else { /* User sets MSG_TRUNC, so return real length of * packet. */ if (flags & MSG_TRUNC) err = msg_len; else err = len - msg_data_left(msg); /* Always set MSG_TRUNC if real length of packet is * bigger than user's buffer. */ if (msg_len > len) msg->msg_flags |= MSG_TRUNC; } out: return err; } int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; int err; sk = sock->sk; if (unlikely(flags & MSG_ERRQUEUE)) return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR); vsk = vsock_sk(sk); err = 0; lock_sock(sk); transport = vsk->transport; if (!transport || sk->sk_state != TCP_ESTABLISHED) { /* Recvmsg is supposed to return 0 if a peer performs an * orderly shutdown. Differentiate between that case and when a * peer has not connected or a local shutdown occurred with the * SOCK_DONE flag. */ if (sock_flag(sk, SOCK_DONE)) err = 0; else err = -ENOTCONN; goto out; } if (flags & MSG_OOB) { err = -EOPNOTSUPP; goto out; } /* We don't check peer_shutdown flag here since peer may actually shut * down, but there can be data in the queue that a local socket can * receive. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; goto out; } /* It is valid on Linux to pass in a zero-length receive buffer. This * is not an error. We may as well bail out now. */ if (!len) { err = 0; goto out; } if (sk->sk_type == SOCK_STREAM) err = __vsock_stream_recvmsg(sk, msg, len, flags); else err = __vsock_seqpacket_recvmsg(sk, msg, len, flags); out: release_sock(sk); return err; } int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { #ifdef CONFIG_BPF_SYSCALL struct sock *sk = sock->sk; const struct proto *prot; prot = READ_ONCE(sk->sk_prot); if (prot != &vsock_proto) return prot->recvmsg(sk, msg, len, flags, NULL); #endif return __vsock_connectible_recvmsg(sock, msg, len, flags); } EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg); static int vsock_set_rcvlowat(struct sock *sk, int val) { const struct vsock_transport *transport; struct vsock_sock *vsk; vsk = vsock_sk(sk); if (val > vsk->buffer_size) return -EINVAL; transport = vsk->transport; if (transport && transport->notify_set_rcvlowat) { int err; err = transport->notify_set_rcvlowat(vsk, val); if (err) return err; } WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); return 0; } static const struct proto_ops vsock_stream_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_connect, .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, .setsockopt = vsock_connectible_setsockopt, .getsockopt = vsock_connectible_getsockopt, .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, .set_rcvlowat = vsock_set_rcvlowat, .read_skb = vsock_read_skb, }; static const struct proto_ops vsock_seqpacket_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_connect, .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, .setsockopt = vsock_connectible_setsockopt, .getsockopt = vsock_connectible_getsockopt, .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, .read_skb = vsock_read_skb, }; static int vsock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct vsock_sock *vsk; struct sock *sk; int ret; if (!sock) return -EINVAL; if (protocol && protocol != PF_VSOCK) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_DGRAM: sock->ops = &vsock_dgram_ops; break; case SOCK_STREAM: sock->ops = &vsock_stream_ops; break; case SOCK_SEQPACKET: sock->ops = &vsock_seqpacket_ops; break; default: return -ESOCKTNOSUPPORT; } sock->state = SS_UNCONNECTED; sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern); if (!sk) return -ENOMEM; vsk = vsock_sk(sk); if (sock->type == SOCK_DGRAM) { ret = vsock_assign_transport(vsk, NULL); if (ret < 0) { sock->sk = NULL; sock_put(sk); return ret; } } /* SOCK_DGRAM doesn't have 'setsockopt' callback set in its * proto_ops, so there is no handler for custom logic. */ if (sock_type_connectible(sock->type)) set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); vsock_insert_unbound(vsk); return 0; } static const struct net_proto_family vsock_family_ops = { .family = AF_VSOCK, .create = vsock_create, .owner = THIS_MODULE, }; static long vsock_dev_do_ioctl(struct file *filp, unsigned int cmd, void __user *ptr) { u32 __user *p = ptr; int retval = 0; u32 cid; switch (cmd) { case IOCTL_VM_SOCKETS_GET_LOCAL_CID: /* To be compatible with the VMCI behavior, we prioritize the * guest CID instead of well-know host CID (VMADDR_CID_HOST). */ cid = vsock_registered_transport_cid(&transport_g2h); if (cid == VMADDR_CID_ANY) cid = vsock_registered_transport_cid(&transport_h2g); if (cid == VMADDR_CID_ANY) cid = vsock_registered_transport_cid(&transport_local); if (put_user(cid, p) != 0) retval = -EFAULT; break; default: retval = -ENOIOCTLCMD; } return retval; } static long vsock_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static long vsock_dev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); } #endif static const struct file_operations vsock_device_ops = { .owner = THIS_MODULE, .unlocked_ioctl = vsock_dev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vsock_dev_compat_ioctl, #endif .open = nonseekable_open, }; static struct miscdevice vsock_device = { .name = "vsock", .fops = &vsock_device_ops, }; static int __init vsock_init(void) { int err = 0; vsock_init_tables(); vsock_proto.owner = THIS_MODULE; vsock_device.minor = MISC_DYNAMIC_MINOR; err = misc_register(&vsock_device); if (err) { pr_err("Failed to register misc device\n"); goto err_reset_transport; } err = proto_register(&vsock_proto, 1); /* we want our slab */ if (err) { pr_err("Cannot register vsock protocol\n"); goto err_deregister_misc; } err = sock_register(&vsock_family_ops); if (err) { pr_err("could not register af_vsock (%d) address family: %d\n", AF_VSOCK, err); goto err_unregister_proto; } vsock_bpf_build_proto(); return 0; err_unregister_proto: proto_unregister(&vsock_proto); err_deregister_misc: misc_deregister(&vsock_device); err_reset_transport: return err; } static void __exit vsock_exit(void) { misc_deregister(&vsock_device); sock_unregister(AF_VSOCK); proto_unregister(&vsock_proto); } const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk) { return vsk->transport; } EXPORT_SYMBOL_GPL(vsock_core_get_transport); int vsock_core_register(const struct vsock_transport *t, int features) { const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local; int err = mutex_lock_interruptible(&vsock_register_mutex); if (err) return err; t_h2g = transport_h2g; t_g2h = transport_g2h; t_dgram = transport_dgram; t_local = transport_local; if (features & VSOCK_TRANSPORT_F_H2G) { if (t_h2g) { err = -EBUSY; goto err_busy; } t_h2g = t; } if (features & VSOCK_TRANSPORT_F_G2H) { if (t_g2h) { err = -EBUSY; goto err_busy; } t_g2h = t; } if (features & VSOCK_TRANSPORT_F_DGRAM) { if (t_dgram) { err = -EBUSY; goto err_busy; } t_dgram = t; } if (features & VSOCK_TRANSPORT_F_LOCAL) { if (t_local) { err = -EBUSY; goto err_busy; } t_local = t; } transport_h2g = t_h2g; transport_g2h = t_g2h; transport_dgram = t_dgram; transport_local = t_local; err_busy: mutex_unlock(&vsock_register_mutex); return err; } EXPORT_SYMBOL_GPL(vsock_core_register); void vsock_core_unregister(const struct vsock_transport *t) { mutex_lock(&vsock_register_mutex); if (transport_h2g == t) transport_h2g = NULL; if (transport_g2h == t) transport_g2h = NULL; if (transport_dgram == t) transport_dgram = NULL; if (transport_local == t) transport_local = NULL; mutex_unlock(&vsock_register_mutex); } EXPORT_SYMBOL_GPL(vsock_core_unregister); module_init(vsock_init); module_exit(vsock_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Socket Family"); MODULE_VERSION("1.0.2.0-k"); MODULE_LICENSE("GPL v2"); |
| 3 1 1 2 1 4 1 4 3 4 1 3 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 | // SPDX-License-Identifier: GPL-2.0-or-later /* Public-key operation keyctls * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/slab.h> #include <linux/err.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/parser.h> #include <linux/uaccess.h> #include <keys/user-type.h> #include "internal.h" static void keyctl_pkey_params_free(struct kernel_pkey_params *params) { kfree(params->info); key_put(params->key); } enum { Opt_err, Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */ Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */ }; static const match_table_t param_keys = { { Opt_enc, "enc=%s" }, { Opt_hash, "hash=%s" }, { Opt_err, NULL } }; /* * Parse the information string which consists of key=val pairs. */ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params) { unsigned long token_mask = 0; substring_t args[MAX_OPT_ARGS]; char *c = params->info, *p, *q; int token; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, param_keys, args); if (token == Opt_err) return -EINVAL; if (__test_and_set_bit(token, &token_mask)) return -EINVAL; q = args[0].from; if (!q[0]) return -EINVAL; switch (token) { case Opt_enc: params->encoding = q; break; case Opt_hash: params->hash_algo = q; break; default: return -EINVAL; } } return 0; } /* * Interpret parameters. Callers must always call the free function * on params, even if an error is returned. */ static int keyctl_pkey_params_get(key_serial_t id, const char __user *_info, struct kernel_pkey_params *params) { key_ref_t key_ref; void *p; int ret; memset(params, 0, sizeof(*params)); params->encoding = "raw"; p = strndup_user(_info, PAGE_SIZE); if (IS_ERR(p)) return PTR_ERR(p); params->info = p; ret = keyctl_pkey_params_parse(params); if (ret < 0) return ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); params->key = key_ref_to_ptr(key_ref); if (!params->key->type->asym_query) return -EOPNOTSUPP; return 0; } /* * Get parameters from userspace. Callers must always call the free function * on params, even if an error is returned. */ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params, const char __user *_info, int op, struct kernel_pkey_params *params) { struct keyctl_pkey_params uparams; struct kernel_pkey_query info; int ret; memset(params, 0, sizeof(*params)); params->encoding = "raw"; if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0) return -EFAULT; ret = keyctl_pkey_params_get(uparams.key_id, _info, params); if (ret < 0) return ret; ret = params->key->type->asym_query(params, &info); if (ret < 0) return ret; switch (op) { case KEYCTL_PKEY_ENCRYPT: if (uparams.in_len > info.max_dec_size || uparams.out_len > info.max_enc_size) return -EINVAL; break; case KEYCTL_PKEY_DECRYPT: if (uparams.in_len > info.max_enc_size || uparams.out_len > info.max_dec_size) return -EINVAL; break; case KEYCTL_PKEY_SIGN: if (uparams.in_len > info.max_data_size || uparams.out_len > info.max_sig_size) return -EINVAL; break; case KEYCTL_PKEY_VERIFY: if (uparams.in_len > info.max_data_size || uparams.in2_len > info.max_sig_size) return -EINVAL; break; default: BUG(); } params->in_len = uparams.in_len; params->out_len = uparams.out_len; /* Note: same as in2_len */ return 0; } /* * Query information about an asymmetric key. */ long keyctl_pkey_query(key_serial_t id, const char __user *_info, struct keyctl_pkey_query __user *_res) { struct kernel_pkey_params params; struct kernel_pkey_query res; long ret; ret = keyctl_pkey_params_get(id, _info, ¶ms); if (ret < 0) goto error; ret = params.key->type->asym_query(¶ms, &res); if (ret < 0) goto error; ret = -EFAULT; if (copy_to_user(_res, &res, sizeof(res)) == 0 && clear_user(_res->__spare, sizeof(_res->__spare)) == 0) ret = 0; error: keyctl_pkey_params_free(¶ms); return ret; } /* * Encrypt/decrypt/sign * * Encrypt data, decrypt data or sign data using a public key. * * _info is a string of supplementary information in key=val format. For * instance, it might contain: * * "enc=pkcs1 hash=sha256" * * where enc= specifies the encoding and hash= selects the OID to go in that * particular encoding if required. If enc= isn't supplied, it's assumed that * the caller is supplying raw values. * * If successful, the amount of data written into the output buffer is * returned. */ long keyctl_pkey_e_d_s(int op, const struct keyctl_pkey_params __user *_params, const char __user *_info, const void __user *_in, void __user *_out) { struct kernel_pkey_params params; void *in, *out; long ret; ret = keyctl_pkey_params_get_2(_params, _info, op, ¶ms); if (ret < 0) goto error_params; ret = -EOPNOTSUPP; if (!params.key->type->asym_eds_op) goto error_params; switch (op) { case KEYCTL_PKEY_ENCRYPT: params.op = kernel_pkey_encrypt; break; case KEYCTL_PKEY_DECRYPT: params.op = kernel_pkey_decrypt; break; case KEYCTL_PKEY_SIGN: params.op = kernel_pkey_sign; break; default: BUG(); } in = memdup_user(_in, params.in_len); if (IS_ERR(in)) { ret = PTR_ERR(in); goto error_params; } ret = -ENOMEM; out = kmalloc(params.out_len, GFP_KERNEL); if (!out) goto error_in; ret = params.key->type->asym_eds_op(¶ms, in, out); if (ret < 0) goto error_out; if (copy_to_user(_out, out, ret) != 0) ret = -EFAULT; error_out: kfree(out); error_in: kfree(in); error_params: keyctl_pkey_params_free(¶ms); return ret; } /* * Verify a signature. * * Verify a public key signature using the given key, or if not given, search * for a matching key. * * _info is a string of supplementary information in key=val format. For * instance, it might contain: * * "enc=pkcs1 hash=sha256" * * where enc= specifies the signature blob encoding and hash= selects the OID * to go in that particular encoding. If enc= isn't supplied, it's assumed * that the caller is supplying raw values. * * If successful, 0 is returned. */ long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params, const char __user *_info, const void __user *_in, const void __user *_in2) { struct kernel_pkey_params params; void *in, *in2; long ret; ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY, ¶ms); if (ret < 0) goto error_params; ret = -EOPNOTSUPP; if (!params.key->type->asym_verify_signature) goto error_params; in = memdup_user(_in, params.in_len); if (IS_ERR(in)) { ret = PTR_ERR(in); goto error_params; } in2 = memdup_user(_in2, params.in2_len); if (IS_ERR(in2)) { ret = PTR_ERR(in2); goto error_in; } params.op = kernel_pkey_verify; ret = params.key->type->asym_verify_signature(¶ms, in, in2); kfree(in2); error_in: kfree(in); error_params: keyctl_pkey_params_free(¶ms); return ret; } |
| 123 69 15 9 15 1 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 | /* SPDX-License-Identifier: GPL-2.0 */ /* * fscrypt.h: declarations for per-file encryption * * Filesystems that implement per-file encryption must include this header * file. * * Copyright (C) 2015, Google, Inc. * * Written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. */ #ifndef _LINUX_FSCRYPT_H #define _LINUX_FSCRYPT_H #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <uapi/linux/fscrypt.h> /* * The lengths of all file contents blocks must be divisible by this value. * This is needed to ensure that all contents encryption modes will work, as * some of the supported modes don't support arbitrarily byte-aligned messages. * * Since the needed alignment is 16 bytes, most filesystems will meet this * requirement naturally, as typical block sizes are powers of 2. However, if a * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via * compression), then it will need to pad to this alignment before encryption. */ #define FSCRYPT_CONTENTS_ALIGNMENT 16 union fscrypt_policy; struct fscrypt_inode_info; struct fs_parameter; struct seq_file; struct fscrypt_str { unsigned char *name; u32 len; }; struct fscrypt_name { const struct qstr *usr_fname; struct fscrypt_str disk_name; u32 hash; u32 minor_hash; struct fscrypt_str crypto_buf; bool is_nokey_name; }; #define FSTR_INIT(n, l) { .name = n, .len = l } #define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) #define fname_name(p) ((p)->disk_name.name) #define fname_len(p) ((p)->disk_name.len) /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION /* Crypto operations for filesystems */ struct fscrypt_operations { /* * The offset of the pointer to struct fscrypt_inode_info in the * filesystem-specific part of the inode, relative to the beginning of * the common part of the inode (the 'struct inode'). */ ptrdiff_t inode_info_offs; /* * If set, then fs/crypto/ will allocate a global bounce page pool the * first time an encryption key is set up for a file. The bounce page * pool is required by the following functions: * * - fscrypt_encrypt_pagecache_blocks() * - fscrypt_zeroout_range() for files not using inline crypto * * If the filesystem doesn't use those, it doesn't need to set this. */ unsigned int needs_bounce_pages : 1; /* * If set, then fs/crypto/ will allow the use of encryption settings * that assume inode numbers fit in 32 bits (i.e. * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other * prerequisites for these settings are also met. This is only useful * if the filesystem wants to support inline encryption hardware that is * limited to 32-bit or 64-bit data unit numbers and where programming * keyslots is very slow. */ unsigned int has_32bit_inodes : 1; /* * If set, then fs/crypto/ will allow users to select a crypto data unit * size that is less than the filesystem block size. This is done via * the log2_data_unit_size field of the fscrypt policy. This flag is * not compatible with filesystems that encrypt variable-length blocks * (i.e. blocks that aren't all equal to filesystem's block size), for * example as a result of compression. It's also not compatible with * the fscrypt_encrypt_block_inplace() and * fscrypt_decrypt_block_inplace() functions. */ unsigned int supports_subblock_data_units : 1; /* * This field exists only for backwards compatibility reasons and should * only be set by the filesystems that are setting it already. It * contains the filesystem-specific key description prefix that is * accepted for "logon" keys for v1 fscrypt policies. This * functionality is deprecated in favor of the generic prefix * "fscrypt:", which itself is deprecated in favor of the filesystem * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that * are newly adding fscrypt support should not set this field. */ const char *legacy_key_prefix; /* * Get the fscrypt context of the given inode. * * @inode: the inode whose context to get * @ctx: the buffer into which to get the context * @len: length of the @ctx buffer in bytes * * Return: On success, returns the length of the context in bytes; this * may be less than @len. On failure, returns -ENODATA if the * inode doesn't have a context, -ERANGE if the context is * longer than @len, or another -errno code. */ int (*get_context)(struct inode *inode, void *ctx, size_t len); /* * Set an fscrypt context on the given inode. * * @inode: the inode whose context to set. The inode won't already have * an fscrypt context. * @ctx: the context to set * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE) * @fs_data: If called from fscrypt_set_context(), this will be the * value the filesystem passed to fscrypt_set_context(). * Otherwise (i.e. when called from * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL. * * i_rwsem will be held for write. * * Return: 0 on success, -errno on failure. */ int (*set_context)(struct inode *inode, const void *ctx, size_t len, void *fs_data); /* * Get the dummy fscrypt policy in use on the filesystem (if any). * * Filesystems only need to implement this function if they support the * test_dummy_encryption mount option. * * Return: A pointer to the dummy fscrypt policy, if the filesystem is * mounted with test_dummy_encryption; otherwise NULL. */ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb); /* * Check whether a directory is empty. i_rwsem will be held for write. */ bool (*empty_dir)(struct inode *inode); /* * Check whether the filesystem's inode numbers and UUID are stable, * meaning that they will never be changed even by offline operations * such as filesystem shrinking and therefore can be used in the * encryption without the possibility of files becoming unreadable. * * Filesystems only need to implement this function if they want to * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These * flags are designed to work around the limitations of UFS and eMMC * inline crypto hardware, and they shouldn't be used in scenarios where * such hardware isn't being used. * * Leaving this NULL is equivalent to always returning false. */ bool (*has_stable_inodes)(struct super_block *sb); /* * Return an array of pointers to the block devices to which the * filesystem may write encrypted file contents, NULL if the filesystem * only has a single such block device, or an ERR_PTR() on error. * * On successful non-NULL return, *num_devs is set to the number of * devices in the returned array. The caller must free the returned * array using kfree(). * * If the filesystem can use multiple block devices (other than block * devices that aren't used for encrypted file contents, such as * external journal devices), and wants to support inline encryption, * then it must implement this function. Otherwise it's not needed. */ struct block_device **(*get_devices)(struct super_block *sb, unsigned int *num_devs); }; int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags); /* * Returns the address of the fscrypt info pointer within the * filesystem-specific part of the inode. (To save memory on filesystems that * don't support fscrypt, a field in 'struct inode' itself is no longer used.) */ static inline struct fscrypt_inode_info ** fscrypt_inode_info_addr(const struct inode *inode) { VFS_WARN_ON_ONCE(inode->i_sb->s_cop->inode_info_offs == 0); return (void *)inode + inode->i_sb->s_cop->inode_info_offs; } /* * Load the inode's fscrypt info pointer, using a raw dereference. Since this * uses a raw dereference with no memory barrier, it is appropriate to use only * when the caller knows the inode's key setup already happened, resulting in * non-NULL fscrypt info. E.g., the file contents en/decryption functions use * this, since fscrypt_file_open() set up the key. */ static inline struct fscrypt_inode_info * fscrypt_get_inode_info_raw(const struct inode *inode) { struct fscrypt_inode_info *ci = *fscrypt_inode_info_addr(inode); VFS_WARN_ON_ONCE(ci == NULL); return ci; } static inline struct fscrypt_inode_info * fscrypt_get_inode_info(const struct inode *inode) { /* * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info(). * I.e., another task may publish the fscrypt info concurrently, * executing a RELEASE barrier. Use smp_load_acquire() here to safely * ACQUIRE the memory the other task published. */ return smp_load_acquire(fscrypt_inode_info_addr(inode)); } /** * fscrypt_needs_contents_encryption() - check whether an inode needs * contents encryption * @inode: the inode to check * * Return: %true iff the inode is an encrypted regular file and the kernel was * built with fscrypt support. * * If you need to know whether the encrypt bit is set even when the kernel was * built without fscrypt support, you must use IS_ENCRYPTED() directly instead. */ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) { return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); } /* * When d_splice_alias() moves a directory's no-key alias to its * plaintext alias as a result of the encryption key being added, * DCACHE_NOKEY_NAME must be cleared and there might be an opportunity * to disable d_revalidate. Note that we don't have to support the * inverse operation because fscrypt doesn't allow no-key names to be * the source or target of a rename(). */ static inline void fscrypt_handle_d_move(struct dentry *dentry) { /* * VFS calls fscrypt_handle_d_move even for non-fscrypt * filesystems. */ if (dentry->d_flags & DCACHE_NOKEY_NAME) { dentry->d_flags &= ~DCACHE_NOKEY_NAME; /* * Other filesystem features might be handling dentry * revalidation, in which case it cannot be disabled. */ if (dentry->d_op->d_revalidate == fscrypt_d_revalidate) dentry->d_flags &= ~DCACHE_OP_REVALIDATE; } } /** * fscrypt_is_nokey_name() - test whether a dentry is a no-key name * @dentry: the dentry to check * * This returns true if the dentry is a no-key dentry. A no-key dentry is a * dentry that was created in an encrypted directory that hasn't had its * encryption key added yet. Such dentries may be either positive or negative. * * When a filesystem is asked to create a new filename in an encrypted directory * and the new filename's dentry is a no-key dentry, it must fail the operation * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(), * ->rename(), and ->link(). (However, ->rename() and ->link() are already * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().) * * This is necessary because creating a filename requires the directory's * encryption key, but just checking for the key on the directory inode during * the final filesystem operation doesn't guarantee that the key was available * during the preceding dentry lookup. And the key must have already been * available during the dentry lookup in order for it to have been checked * whether the filename already exists in the directory and for the new file's * dentry not to be invalidated due to it incorrectly having the no-key flag. * * Return: %true if the dentry is a no-key name */ static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) { return dentry->d_flags & DCACHE_NOKEY_NAME; } static inline void fscrypt_prepare_dentry(struct dentry *dentry, bool is_nokey_name) { /* * This code tries to only take ->d_lock when necessary to write * to ->d_flags. We shouldn't be peeking on d_flags for * DCACHE_OP_REVALIDATE unlocked, but in the unlikely case * there is a race, the worst it can happen is that we fail to * unset DCACHE_OP_REVALIDATE and pay the cost of an extra * d_revalidate. */ if (is_nokey_name) { spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_NOKEY_NAME; spin_unlock(&dentry->d_lock); } else if (dentry->d_flags & DCACHE_OP_REVALIDATE && dentry->d_op->d_revalidate == fscrypt_d_revalidate) { /* * Unencrypted dentries and encrypted dentries where the * key is available are always valid from fscrypt * perspective. Avoid the cost of calling * fscrypt_d_revalidate unnecessarily. */ spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_OP_REVALIDATE; spin_unlock(&dentry->d_lock); } } /* crypto.c */ void fscrypt_enqueue_decrypt_work(struct work_struct *); struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs, gfp_t gfp_flags); int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num); int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs); int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num); static inline bool fscrypt_is_bounce_page(struct page *page) { return page->mapping == NULL; } static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { return (struct page *)page_private(bounce_page); } static inline bool fscrypt_is_bounce_folio(const struct folio *folio) { return folio->mapping == NULL; } static inline struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio) { return bounce_folio->private; } void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg); int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); int fscrypt_context_for_new_inode(void *ctx, struct inode *inode); int fscrypt_set_context(struct inode *inode, void *fs_data); struct fscrypt_dummy_policy { const union fscrypt_policy *policy; }; int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param, struct fscrypt_dummy_policy *dummy_policy); bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1, const struct fscrypt_dummy_policy *p2); void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, struct super_block *sb); static inline bool fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy) { return dummy_policy->policy != NULL; } static inline void fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) { kfree(dummy_policy->policy); dummy_policy->policy = NULL; } /* keyring.c */ void fscrypt_destroy_keyring(struct super_block *sb); int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode, bool *encrypt_ret); void fscrypt_put_encryption_info(struct inode *inode); void fscrypt_free_inode(struct inode *inode); int fscrypt_drop_inode(struct inode *inode); /* fname.c */ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, u8 *out, unsigned int olen); bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret); int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, int lookup, struct fscrypt_name *fname); static inline void fscrypt_free_filename(struct fscrypt_name *fname) { kfree(fname->crypto_buf.name); } int fscrypt_fname_alloc_buffer(u32 max_encrypted_len, struct fscrypt_str *crypto_str); void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str); int fscrypt_fname_disk_to_usr(const struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname); bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len); u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name); /* bio.c */ bool fscrypt_decrypt_bio(struct bio *bio); int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len); /* hooks.c */ int fscrypt_file_open(struct inode *inode, struct file *filp); int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, struct dentry *dentry); int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname); int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry); int __fscrypt_prepare_readdir(struct inode *dir); int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr); int fscrypt_prepare_setflags(struct inode *inode, unsigned int oldflags, unsigned int flags); int fscrypt_prepare_symlink(struct inode *dir, const char *target, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link); int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link); const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { sb->s_cop = s_cop; } #else /* !CONFIG_FS_ENCRYPTION */ static inline struct fscrypt_inode_info * fscrypt_get_inode_info(const struct inode *inode) { return NULL; } static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) { return false; } static inline void fscrypt_handle_d_move(struct dentry *dentry) { } static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) { return false; } static inline void fscrypt_prepare_dentry(struct dentry *dentry, bool is_nokey_name) { } /* crypto.c */ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) { } static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs, gfp_t gfp_flags) { return ERR_PTR(-EOPNOTSUPP); } static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs) { return -EOPNOTSUPP; } static inline int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } static inline bool fscrypt_is_bounce_page(struct page *page) { return false; } static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } static inline bool fscrypt_is_bounce_folio(const struct folio *folio) { return false; } static inline struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } static inline void fscrypt_free_bounce_page(struct page *bounce_page) { } /* policy.c */ static inline int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { return 0; } static inline int fscrypt_set_context(struct inode *inode, void *fs_data) { return -EOPNOTSUPP; } struct fscrypt_dummy_policy { }; static inline int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param, struct fscrypt_dummy_policy *dummy_policy) { return -EINVAL; } static inline bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1, const struct fscrypt_dummy_policy *p2) { return true; } static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, struct super_block *sb) { } static inline bool fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy) { return false; } static inline void fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) { } /* keyring.c */ static inline void fscrypt_destroy_keyring(struct super_block *sb) { } static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } /* keysetup.c */ static inline int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode, bool *encrypt_ret) { if (IS_ENCRYPTED(dir)) return -EOPNOTSUPP; return 0; } static inline void fscrypt_put_encryption_info(struct inode *inode) { return; } static inline void fscrypt_free_inode(struct inode *inode) { } static inline int fscrypt_drop_inode(struct inode *inode) { return 0; } /* fname.c */ static inline int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct fscrypt_name *fname) { if (IS_ENCRYPTED(dir)) return -EOPNOTSUPP; memset(fname, 0, sizeof(*fname)); fname->usr_fname = iname; fname->disk_name.name = (unsigned char *)iname->name; fname->disk_name.len = iname->len; return 0; } static inline void fscrypt_free_filename(struct fscrypt_name *fname) { return; } static inline int fscrypt_fname_alloc_buffer(u32 max_encrypted_len, struct fscrypt_str *crypto_str) { return -EOPNOTSUPP; } static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { return; } static inline int fscrypt_fname_disk_to_usr(const struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname) { return -EOPNOTSUPP; } static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { /* Encryption support disabled; use standard comparison */ if (de_name_len != fname->disk_name.len) return false; return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); } static inline u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name) { WARN_ON_ONCE(1); return 0; } static inline int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { return 1; } /* bio.c */ static inline bool fscrypt_decrypt_bio(struct bio *bio) { return true; } static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { return -EOPNOTSUPP; } /* hooks.c */ static inline int fscrypt_file_open(struct inode *inode, struct file *filp) { if (IS_ENCRYPTED(inode)) return -EOPNOTSUPP; return 0; } static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, struct dentry *dentry) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname) { return -EOPNOTSUPP; } static inline int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_readdir(struct inode *dir) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr) { return -EOPNOTSUPP; } static inline int fscrypt_prepare_setflags(struct inode *inode, unsigned int oldflags, unsigned int flags) { return 0; } static inline int fscrypt_prepare_symlink(struct inode *dir, const char *target, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link) { if (IS_ENCRYPTED(dir)) return -EOPNOTSUPP; disk_link->name = (unsigned char *)target; disk_link->len = len + 1; if (disk_link->len > max_len) return -ENAMETOOLONG; return 0; } static inline int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link) { return -EOPNOTSUPP; } static inline const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done) { return ERR_PTR(-EOPNOTSUPP); } static inline int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat) { return -EOPNOTSUPP; } static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { } #endif /* !CONFIG_FS_ENCRYPTION */ /* inline_crypt.c */ #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode); void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, u64 first_lblk, gfp_t gfp_mask); void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, const struct buffer_head *first_bh, gfp_t gfp_mask); bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, u64 next_lblk); bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh); bool fscrypt_dio_supported(struct inode *inode); u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks); #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) { return false; } static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, u64 first_lblk, gfp_t gfp_mask) { } static inline void fscrypt_set_bio_crypt_ctx_bh( struct bio *bio, const struct buffer_head *first_bh, gfp_t gfp_mask) { } static inline bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, u64 next_lblk) { return true; } static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh) { return true; } static inline bool fscrypt_dio_supported(struct inode *inode) { return !fscrypt_needs_contents_encryption(inode); } static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) { return nr_blocks; } #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ /** * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline * encryption * @inode: an inode. If encrypted, its key must be set up. * * Return: true if the inode requires file contents encryption and if the * encryption should be done in the block layer via blk-crypto rather * than in the filesystem layer. */ static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) { return fscrypt_needs_contents_encryption(inode) && __fscrypt_inode_uses_inline_crypto(inode); } /** * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer * encryption * @inode: an inode. If encrypted, its key must be set up. * * Return: true if the inode requires file contents encryption and if the * encryption should be done in the filesystem layer rather than in the * block layer via blk-crypto. */ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) { return fscrypt_needs_contents_encryption(inode) && !__fscrypt_inode_uses_inline_crypto(inode); } /** * fscrypt_has_encryption_key() - check whether an inode has had its key set up * @inode: the inode to check * * Return: %true if the inode has had its encryption key set up, else %false. * * Usually this should be preceded by fscrypt_get_encryption_info() to try to * set up the key first. */ static inline bool fscrypt_has_encryption_key(const struct inode *inode) { return fscrypt_get_inode_info(inode) != NULL; } /** * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted * directory * @old_dentry: an existing dentry for the inode being linked * @dir: the target directory * @dentry: negative dentry for the target filename * * A new link can only be added to an encrypted directory if the directory's * encryption key is available --- since otherwise we'd have no way to encrypt * the filename. * * We also verify that the link will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. * * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, * -EXDEV if the link would result in an inconsistent encryption policy, or * another -errno code. */ static inline int fscrypt_prepare_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { if (IS_ENCRYPTED(dir)) return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry); return 0; } /** * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted * directories * @old_dir: source directory * @old_dentry: dentry for source file * @new_dir: target directory * @new_dentry: dentry for target location (may be negative unless exchanging) * @flags: rename flags (we care at least about %RENAME_EXCHANGE) * * Prepare for ->rename() where the source and/or target directories may be * encrypted. A new link can only be added to an encrypted directory if the * directory's encryption key is available --- since otherwise we'd have no way * to encrypt the filename. A rename to an existing name, on the other hand, * *is* cryptographically possible without the key. However, we take the more * conservative approach and just forbid all no-key renames. * * We also verify that the rename will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. * * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the * rename would cause inconsistent encryption policies, or another -errno code. */ static inline int fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir)) return __fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, flags); return 0; } /** * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted * directory * @dir: directory being searched * @dentry: filename being looked up * @fname: (output) the name to use to search the on-disk directory * * Prepare for ->lookup() in a directory which may be encrypted by determining * the name that will actually be used to search the directory on-disk. If the * directory's encryption policy is supported by this kernel and its encryption * key is available, then the lookup is assumed to be by plaintext name; * otherwise, it is assumed to be by no-key name. * * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key * name. In this case the filesystem must assign the dentry a dentry_operations * which contains fscrypt_d_revalidate (or contains a d_revalidate method that * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the * directory's encryption key is later added. * * Return: 0 on success; -ENOENT if the directory's key is unavailable but the * filename isn't a valid no-key name, so a negative dentry should be created; * or another -errno code. */ static inline int fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname) { if (IS_ENCRYPTED(dir)) return __fscrypt_prepare_lookup(dir, dentry, fname); memset(fname, 0, sizeof(*fname)); fname->usr_fname = &dentry->d_name; fname->disk_name.name = (unsigned char *)dentry->d_name.name; fname->disk_name.len = dentry->d_name.len; fscrypt_prepare_dentry(dentry, false); return 0; } /** * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory * @dir: the directory inode * * If the directory is encrypted and it doesn't already have its encryption key * set up, try to set it up so that the filenames will be listed in plaintext * form rather than in no-key form. * * Return: 0 on success; -errno on error. Note that the encryption key being * unavailable is not considered an error. It is also not an error if * the encryption policy is unsupported by this kernel; that is treated * like the key being unavailable, so that files can still be deleted. */ static inline int fscrypt_prepare_readdir(struct inode *dir) { if (IS_ENCRYPTED(dir)) return __fscrypt_prepare_readdir(dir); return 0; } /** * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's * attributes * @dentry: dentry through which the inode is being changed * @attr: attributes to change * * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file, * most attribute changes are allowed even without the encryption key. However, * without the encryption key we do have to forbid truncates. This is needed * because the size being truncated to may not be a multiple of the filesystem * block size, and in that case we'd have to decrypt the final block, zero the * portion past i_size, and re-encrypt it. (We *could* allow truncating to a * filesystem block boundary, but it's simpler to just forbid all truncates --- * and we already forbid all other contents modifications without the key.) * * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code * if a problem occurred while setting up the encryption key. */ static inline int fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr) { if (IS_ENCRYPTED(d_inode(dentry))) return __fscrypt_prepare_setattr(dentry, attr); return 0; } /** * fscrypt_encrypt_symlink() - encrypt the symlink target if needed * @inode: symlink inode * @target: plaintext symlink target * @len: length of @target excluding null terminator * @disk_link: (in/out) the on-disk symlink target being prepared * * If the symlink target needs to be encrypted, then this function encrypts it * into @disk_link->name. fscrypt_prepare_symlink() must have been called * previously to compute @disk_link->len. If the filesystem did not allocate a * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one * will be kmalloc()'ed and the filesystem will be responsible for freeing it. * * Return: 0 on success, -errno on failure */ static inline int fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link) { if (IS_ENCRYPTED(inode)) return __fscrypt_encrypt_symlink(inode, target, len, disk_link); return 0; } /* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ static inline void fscrypt_finalize_bounce_page(struct page **pagep) { struct page *page = *pagep; if (fscrypt_is_bounce_page(page)) { *pagep = fscrypt_pagecache_page(page); fscrypt_free_bounce_page(page); } } #endif /* _LINUX_FSCRYPT_H */ |
| 18 37 154 14 10 3 89 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIME64_H #define _LINUX_TIME64_H #include <linux/math64.h> #include <vdso/time64.h> typedef __s64 time64_t; typedef __u64 timeu64_t; #include <uapi/linux/time.h> struct timespec64 { time64_t tv_sec; /* seconds */ long tv_nsec; /* nanoseconds */ }; struct itimerspec64 { struct timespec64 it_interval; struct timespec64 it_value; }; /* Parameters used to convert the timespec values: */ #define PSEC_PER_NSEC 1000L /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_MIN (-KTIME_MAX - 1) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) #define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) /* * Limits for settimeofday(): * * To prevent setting the time close to the wraparound point time setting * is limited so a reasonable uptime can be accomodated. Uptime of 30 years * should be really sufficient, which means the cutoff is 2232. At that * point the cutoff is just a small part of the larger problem. */ #define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) #define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } static inline bool timespec64_is_epoch(const struct timespec64 *ts) { return ts->tv_sec == 0 && ts->tv_nsec == 0; } /* * lhs < rhs: return <0 * lhs == rhs: return 0 * lhs > rhs: return >0 */ static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec); return ts_delta; } /* * sub = lhs - rhs, in normalized form */ static inline struct timespec64 timespec64_sub(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec); return ts_delta; } /* * Returns true if the timespec64 is norm, false if denorm: */ static inline bool timespec64_valid(const struct timespec64 *ts) { /* Dates before 1970 are bogus */ if (ts->tv_sec < 0) return false; /* Can't have more nanoseconds then a second */ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return false; return true; } static inline bool timespec64_valid_strict(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values that could overflow ktime_t */ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) return false; return true; } static inline bool timespec64_valid_settod(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) return false; return true; } /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted * * Returns the scalar nanosecond representation of the timespec64 * parameter. */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { /* Prevent multiplication overflow / underflow */ if (ts->tv_sec >= KTIME_SEC_MAX) return KTIME_MAX; if (ts->tv_sec <= KTIME_SEC_MIN) return KTIME_MIN; return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } /** * ns_to_timespec64 - Convert nanoseconds to timespec64 * @nsec: the nanoseconds value to be converted * * Returns the timespec64 representation of the nsec parameter. */ extern struct timespec64 ns_to_timespec64(s64 nsec); /** * timespec64_add_ns - Adds nanoseconds to a timespec64 * @a: pointer to timespec64 to be incremented * @ns: unsigned nanoseconds value to be added * * This must always be inlined because its used from the x86-64 vdso, * which cannot call other kernel functions. */ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) { a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } /* * timespec64_add_safe assumes both values are positive and checks for * overflow. It will return TIME64_MAX in case of overflow. */ extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, const struct timespec64 rhs); #endif /* _LINUX_TIME64_H */ |
| 13 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM hugetlbfs #if !defined(_TRACE_HUGETLBFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_HUGETLBFS_H #include <linux/tracepoint.h> TRACE_EVENT(hugetlbfs_alloc_inode, TP_PROTO(struct inode *inode, struct inode *dir, int mode), TP_ARGS(inode, dir, mode), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(ino_t, dir) __field(__u16, mode) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->dir = dir ? dir->i_ino : 0; __entry->mode = mode; ), TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned long) __entry->dir, __entry->mode) ); DECLARE_EVENT_CLASS(hugetlbfs__inode, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(__u16, mode) __field(loff_t, size) __field(unsigned int, nlink) __field(unsigned int, seals) __field(blkcnt_t, blocks) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->mode = inode->i_mode; __entry->size = inode->i_size; __entry->nlink = inode->i_nlink; __entry->seals = HUGETLBFS_I(inode)->seals; __entry->blocks = inode->i_blocks; ), TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->mode, __entry->size, __entry->nlink, __entry->seals, (unsigned long long)__entry->blocks) ); DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_evict_inode, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_free_inode, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); TRACE_EVENT(hugetlbfs_setattr, TP_PROTO(struct inode *inode, struct dentry *dentry, struct iattr *attr), TP_ARGS(inode, dentry, attr), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, d_len) __string(d_name, dentry->d_name.name) __field(unsigned int, ia_valid) __field(unsigned int, ia_mode) __field(loff_t, old_size) __field(loff_t, ia_size) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->d_len = dentry->d_name.len; __assign_str(d_name); __entry->ia_valid = attr->ia_valid; __entry->ia_mode = attr->ia_mode; __entry->old_size = inode->i_size; __entry->ia_size = attr->ia_size; ), TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino, __entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode, __entry->old_size, __entry->ia_size) ); TRACE_EVENT(hugetlbfs_fallocate, TP_PROTO(struct inode *inode, int mode, loff_t offset, loff_t len, int ret), TP_ARGS(inode, mode, offset, len, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(int, mode) __field(loff_t, offset) __field(loff_t, len) __field(loff_t, size) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->mode = mode; __entry->offset = offset; __entry->len = len; __entry->size = inode->i_size; __entry->ret = ret; ), TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino, __entry->mode, (unsigned long long)__entry->offset, (unsigned long long)__entry->len, (unsigned long long)__entry->size, __entry->ret) ); #endif /* _TRACE_HUGETLBFS_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/file.h> #include <linux/net.h> #include <linux/rcupdate.h> #include <linux/tcp.h> #include <net/ip.h> #include <net/psp.h> #include "psp.h" struct psp_dev *psp_dev_get_for_sock(struct sock *sk) { struct psp_dev *psd = NULL; struct dst_entry *dst; rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) { psd = rcu_dereference(dst_dev_rcu(dst)->psp_dev); if (psd && !psp_dev_tryget(psd)) psd = NULL; } rcu_read_unlock(); return psd; } static struct sk_buff * psp_validate_xmit(struct sock *sk, struct net_device *dev, struct sk_buff *skb) { struct psp_assoc *pas; bool good; rcu_read_lock(); pas = psp_skb_get_assoc_rcu(skb); good = !pas || rcu_access_pointer(dev->psp_dev) == pas->psd; rcu_read_unlock(); if (!good) { sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PSP_OUTPUT); return NULL; } return skb; } struct psp_assoc *psp_assoc_create(struct psp_dev *psd) { struct psp_assoc *pas; lockdep_assert_held(&psd->lock); pas = kzalloc(struct_size(pas, drv_data, psd->caps->assoc_drv_spc), GFP_KERNEL_ACCOUNT); if (!pas) return NULL; pas->psd = psd; pas->dev_id = psd->id; pas->generation = psd->generation; psp_dev_get(psd); refcount_set(&pas->refcnt, 1); list_add_tail(&pas->assocs_list, &psd->active_assocs); return pas; } static struct psp_assoc *psp_assoc_dummy(struct psp_assoc *pas) { struct psp_dev *psd = pas->psd; size_t sz; lockdep_assert_held(&psd->lock); sz = struct_size(pas, drv_data, psd->caps->assoc_drv_spc); return kmemdup(pas, sz, GFP_KERNEL); } static int psp_dev_tx_key_add(struct psp_dev *psd, struct psp_assoc *pas, struct netlink_ext_ack *extack) { return psd->ops->tx_key_add(psd, pas, extack); } void psp_dev_tx_key_del(struct psp_dev *psd, struct psp_assoc *pas) { if (pas->tx.spi) psd->ops->tx_key_del(psd, pas); list_del(&pas->assocs_list); } static void psp_assoc_free(struct work_struct *work) { struct psp_assoc *pas = container_of(work, struct psp_assoc, work); struct psp_dev *psd = pas->psd; mutex_lock(&psd->lock); if (psd->ops) psp_dev_tx_key_del(psd, pas); mutex_unlock(&psd->lock); psp_dev_put(psd); kfree(pas); } static void psp_assoc_free_queue(struct rcu_head *head) { struct psp_assoc *pas = container_of(head, struct psp_assoc, rcu); INIT_WORK(&pas->work, psp_assoc_free); schedule_work(&pas->work); } /** * psp_assoc_put() - release a reference on a PSP association * @pas: association to release */ void psp_assoc_put(struct psp_assoc *pas) { if (pas && refcount_dec_and_test(&pas->refcnt)) call_rcu(&pas->rcu, psp_assoc_free_queue); } void psp_sk_assoc_free(struct sock *sk) { struct psp_assoc *pas = rcu_dereference_protected(sk->psp_assoc, 1); rcu_assign_pointer(sk->psp_assoc, NULL); psp_assoc_put(pas); } int psp_sock_assoc_set_rx(struct sock *sk, struct psp_assoc *pas, struct psp_key_parsed *key, struct netlink_ext_ack *extack) { int err; memcpy(&pas->rx, key, sizeof(*key)); lock_sock(sk); if (psp_sk_assoc(sk)) { NL_SET_ERR_MSG(extack, "Socket already has PSP state"); err = -EBUSY; goto exit_unlock; } refcount_inc(&pas->refcnt); rcu_assign_pointer(sk->psp_assoc, pas); err = 0; exit_unlock: release_sock(sk); return err; } static int psp_sock_recv_queue_check(struct sock *sk, struct psp_assoc *pas) { struct psp_skb_ext *pse; struct sk_buff *skb; skb_rbtree_walk(skb, &tcp_sk(sk)->out_of_order_queue) { pse = skb_ext_find(skb, SKB_EXT_PSP); if (!psp_pse_matches_pas(pse, pas)) return -EBUSY; } skb_queue_walk(&sk->sk_receive_queue, skb) { pse = skb_ext_find(skb, SKB_EXT_PSP); if (!psp_pse_matches_pas(pse, pas)) return -EBUSY; } return 0; } int psp_sock_assoc_set_tx(struct sock *sk, struct psp_dev *psd, u32 version, struct psp_key_parsed *key, struct netlink_ext_ack *extack) { struct inet_connection_sock *icsk; struct psp_assoc *pas, *dummy; int err; lock_sock(sk); pas = psp_sk_assoc(sk); if (!pas) { NL_SET_ERR_MSG(extack, "Socket has no Rx key"); err = -EINVAL; goto exit_unlock; } if (pas->psd != psd) { NL_SET_ERR_MSG(extack, "Rx key from different device"); err = -EINVAL; goto exit_unlock; } if (pas->version != version) { NL_SET_ERR_MSG(extack, "PSP version mismatch with existing state"); err = -EINVAL; goto exit_unlock; } if (pas->tx.spi) { NL_SET_ERR_MSG(extack, "Tx key already set"); err = -EBUSY; goto exit_unlock; } err = psp_sock_recv_queue_check(sk, pas); if (err) { NL_SET_ERR_MSG(extack, "Socket has incompatible segments already in the recv queue"); goto exit_unlock; } /* Pass a fake association to drivers to make sure they don't * try to store pointers to it. For re-keying we'll need to * re-allocate the assoc structures. */ dummy = psp_assoc_dummy(pas); if (!dummy) { err = -ENOMEM; goto exit_unlock; } memcpy(&dummy->tx, key, sizeof(*key)); err = psp_dev_tx_key_add(psd, dummy, extack); if (err) goto exit_free_dummy; memcpy(pas->drv_data, dummy->drv_data, psd->caps->assoc_drv_spc); memcpy(&pas->tx, key, sizeof(*key)); WRITE_ONCE(sk->sk_validate_xmit_skb, psp_validate_xmit); tcp_write_collapse_fence(sk); pas->upgrade_seq = tcp_sk(sk)->rcv_nxt; icsk = inet_csk(sk); icsk->icsk_ext_hdr_len += psp_sk_overhead(sk); icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); exit_free_dummy: kfree(dummy); exit_unlock: release_sock(sk); return err; } void psp_assocs_key_rotated(struct psp_dev *psd) { struct psp_assoc *pas, *next; /* Mark the stale associations as invalid, they will no longer * be able to Rx any traffic. */ list_for_each_entry_safe(pas, next, &psd->prev_assocs, assocs_list) { pas->generation |= ~PSP_GEN_VALID_MASK; psd->stats.stales++; } list_splice_init(&psd->prev_assocs, &psd->stale_assocs); list_splice_init(&psd->active_assocs, &psd->prev_assocs); /* TODO: we should inform the sockets that got shut down */ } void psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk) { struct psp_assoc *pas = psp_sk_assoc(sk); if (pas) refcount_inc(&pas->refcnt); rcu_assign_pointer(tw->psp_assoc, pas); tw->tw_validate_xmit_skb = psp_validate_xmit; } void psp_twsk_assoc_free(struct inet_timewait_sock *tw) { struct psp_assoc *pas = rcu_dereference_protected(tw->psp_assoc, 1); rcu_assign_pointer(tw->psp_assoc, NULL); psp_assoc_put(pas); } void psp_reply_set_decrypted(const struct sock *sk, struct sk_buff *skb) { struct psp_assoc *pas; rcu_read_lock(); pas = psp_sk_get_assoc_rcu(sk); if (pas && pas->tx.spi) skb->decrypted = 1; rcu_read_unlock(); } EXPORT_IPV6_MOD_GPL(psp_reply_set_decrypted); |
| 20 20 21 5 28 28 28 28 28 28 28 28 8 21 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_bmap.h" #include "xfs_dir2.h" #include "xfs_dir2_priv.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_health.h" #include "xfs_bmap_btree.h" #include "xfs_trans_space.h" #include "xfs_parent.h" #include "xfs_ag.h" #include "xfs_ialloc.h" const struct xfs_name xfs_name_dotdot = { .name = (const unsigned char *)"..", .len = 2, .type = XFS_DIR3_FT_DIR, }; const struct xfs_name xfs_name_dot = { .name = (const unsigned char *)".", .len = 1, .type = XFS_DIR3_FT_DIR, }; /* * Convert inode mode to directory entry filetype */ unsigned char xfs_mode_to_ftype( int mode) { switch (mode & S_IFMT) { case S_IFREG: return XFS_DIR3_FT_REG_FILE; case S_IFDIR: return XFS_DIR3_FT_DIR; case S_IFCHR: return XFS_DIR3_FT_CHRDEV; case S_IFBLK: return XFS_DIR3_FT_BLKDEV; case S_IFIFO: return XFS_DIR3_FT_FIFO; case S_IFSOCK: return XFS_DIR3_FT_SOCK; case S_IFLNK: return XFS_DIR3_FT_SYMLINK; default: return XFS_DIR3_FT_UNKNOWN; } } /* * ASCII case-insensitive (ie. A-Z) support for directories that was * used in IRIX. */ xfs_dahash_t xfs_ascii_ci_hashname( const struct xfs_name *name) { xfs_dahash_t hash; int i; for (i = 0, hash = 0; i < name->len; i++) hash = xfs_ascii_ci_xfrm(name->name[i]) ^ rol32(hash, 7); return hash; } enum xfs_dacmp xfs_ascii_ci_compname( struct xfs_da_args *args, const unsigned char *name, int len) { enum xfs_dacmp result; int i; if (args->namelen != len) return XFS_CMP_DIFFERENT; result = XFS_CMP_EXACT; for (i = 0; i < len; i++) { if (args->name[i] == name[i]) continue; if (xfs_ascii_ci_xfrm(args->name[i]) != xfs_ascii_ci_xfrm(name[i])) return XFS_CMP_DIFFERENT; result = XFS_CMP_CASE; } return result; } int xfs_da_mount( struct xfs_mount *mp) { struct xfs_da_geometry *dageo; ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT); ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE); mp->m_dir_geo = kzalloc(sizeof(struct xfs_da_geometry), GFP_KERNEL | __GFP_RETRY_MAYFAIL); mp->m_attr_geo = kzalloc(sizeof(struct xfs_da_geometry), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!mp->m_dir_geo || !mp->m_attr_geo) { kfree(mp->m_dir_geo); kfree(mp->m_attr_geo); return -ENOMEM; } /* set up directory geometry */ dageo = mp->m_dir_geo; dageo->blklog = mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog; dageo->fsblog = mp->m_sb.sb_blocklog; dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb); dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog; if (xfs_has_crc(mp)) { dageo->node_hdr_size = sizeof(struct xfs_da3_node_hdr); dageo->leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr); dageo->free_hdr_size = sizeof(struct xfs_dir3_free_hdr); dageo->data_entry_offset = sizeof(struct xfs_dir3_data_hdr); } else { dageo->node_hdr_size = sizeof(struct xfs_da_node_hdr); dageo->leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr); dageo->free_hdr_size = sizeof(struct xfs_dir2_free_hdr); dageo->data_entry_offset = sizeof(struct xfs_dir2_data_hdr); } dageo->leaf_max_ents = (dageo->blksize - dageo->leaf_hdr_size) / sizeof(struct xfs_dir2_leaf_entry); dageo->free_max_bests = (dageo->blksize - dageo->free_hdr_size) / sizeof(xfs_dir2_data_off_t); dageo->data_first_offset = dageo->data_entry_offset + xfs_dir2_data_entsize(mp, 1) + xfs_dir2_data_entsize(mp, 2); /* * Now we've set up the block conversion variables, we can calculate the * segment block constants using the geometry structure. */ dageo->datablk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_DATA_OFFSET); dageo->leafblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_LEAF_OFFSET); dageo->freeblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_FREE_OFFSET); dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) / (uint)sizeof(xfs_da_node_entry_t); dageo->max_extents = (XFS_DIR2_MAX_SPACES * XFS_DIR2_SPACE_SIZE) >> mp->m_sb.sb_blocklog; dageo->magicpct = (dageo->blksize * 37) / 100; /* set up attribute geometry - single fsb only */ dageo = mp->m_attr_geo; dageo->blklog = mp->m_sb.sb_blocklog; dageo->fsblog = mp->m_sb.sb_blocklog; dageo->blksize = 1 << dageo->blklog; dageo->fsbcount = 1; dageo->node_hdr_size = mp->m_dir_geo->node_hdr_size; dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) / (uint)sizeof(xfs_da_node_entry_t); if (xfs_has_large_extent_counts(mp)) dageo->max_extents = XFS_MAX_EXTCNT_ATTR_FORK_LARGE; else dageo->max_extents = XFS_MAX_EXTCNT_ATTR_FORK_SMALL; dageo->magicpct = (dageo->blksize * 37) / 100; return 0; } void xfs_da_unmount( struct xfs_mount *mp) { kfree(mp->m_dir_geo); kfree(mp->m_attr_geo); } /* * Return 1 if directory contains only "." and "..". */ static bool xfs_dir_isempty( xfs_inode_t *dp) { xfs_dir2_sf_hdr_t *sfp; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); if (dp->i_disk_size == 0) /* might happen during shutdown. */ return true; if (dp->i_disk_size > xfs_inode_data_fork_size(dp)) return false; sfp = dp->i_df.if_data; return !sfp->count; } /* * Validate a given inode number. */ int xfs_dir_ino_validate( xfs_mount_t *mp, xfs_ino_t ino) { bool ino_ok = xfs_verify_dir_ino(mp, ino); if (XFS_IS_CORRUPT(mp, !ino_ok) || XFS_TEST_ERROR(mp, XFS_ERRTAG_DIR_INO_VALIDATE)) { xfs_warn(mp, "Invalid inode number 0x%Lx", (unsigned long long) ino); return -EFSCORRUPTED; } return 0; } /* * Initialize a directory with its "." and ".." entries. */ int xfs_dir_init( xfs_trans_t *tp, xfs_inode_t *dp, xfs_inode_t *pdp) { struct xfs_da_args *args; int error; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino); if (error) return error; args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL); if (!args) return -ENOMEM; args->geo = dp->i_mount->m_dir_geo; args->dp = dp; args->trans = tp; args->owner = dp->i_ino; error = xfs_dir2_sf_create(args, pdp->i_ino); kfree(args); return error; } enum xfs_dir2_fmt xfs_dir2_format( struct xfs_da_args *args, int *error) { struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; struct xfs_da_geometry *geo = mp->m_dir_geo; xfs_fileoff_t eof; xfs_assert_ilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); *error = 0; if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) return XFS_DIR2_FMT_SF; *error = xfs_bmap_last_offset(dp, &eof, XFS_DATA_FORK); if (*error) return XFS_DIR2_FMT_ERROR; if (eof == XFS_B_TO_FSB(mp, geo->blksize)) { if (XFS_IS_CORRUPT(mp, dp->i_disk_size != geo->blksize)) { xfs_da_mark_sick(args); *error = -EFSCORRUPTED; return XFS_DIR2_FMT_ERROR; } return XFS_DIR2_FMT_BLOCK; } if (eof == geo->leafblk + geo->fsbcount) return XFS_DIR2_FMT_LEAF; return XFS_DIR2_FMT_NODE; } int xfs_dir_createname_args( struct xfs_da_args *args) { int error; if (!args->inumber) args->op_flags |= XFS_DA_OP_JUSTCHECK; switch (xfs_dir2_format(args, &error)) { case XFS_DIR2_FMT_SF: return xfs_dir2_sf_addname(args); case XFS_DIR2_FMT_BLOCK: return xfs_dir2_block_addname(args); case XFS_DIR2_FMT_LEAF: return xfs_dir2_leaf_addname(args); case XFS_DIR2_FMT_NODE: return xfs_dir2_node_addname(args); default: return error; } } /* * Enter a name in a directory, or check for available space. * If inum is 0, only the available space test is performed. */ int xfs_dir_createname( struct xfs_trans *tp, struct xfs_inode *dp, const struct xfs_name *name, xfs_ino_t inum, /* new entry inode number */ xfs_extlen_t total) /* bmap's total block count */ { struct xfs_da_args *args; int rval; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); if (inum) { rval = xfs_dir_ino_validate(tp->t_mountp, inum); if (rval) return rval; XFS_STATS_INC(dp->i_mount, xs_dir_create); } args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL); if (!args) return -ENOMEM; args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; args->filetype = name->type; args->hashval = xfs_dir2_hashname(dp->i_mount, name); args->inumber = inum; args->dp = dp; args->total = total; args->whichfork = XFS_DATA_FORK; args->trans = tp; args->op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; args->owner = dp->i_ino; rval = xfs_dir_createname_args(args); kfree(args); return rval; } /* * If doing a CI lookup and case-insensitive match, dup actual name into * args.value. Return EEXIST for success (ie. name found) or an error. */ int xfs_dir_cilookup_result( struct xfs_da_args *args, const unsigned char *name, int len) { if (args->cmpresult == XFS_CMP_DIFFERENT) return -ENOENT; if (args->cmpresult != XFS_CMP_CASE || !(args->op_flags & XFS_DA_OP_CILOOKUP)) return -EEXIST; args->value = kmemdup(name, len, GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_RETRY_MAYFAIL); if (!args->value) return -ENOMEM; args->valuelen = len; return -EEXIST; } int xfs_dir_lookup_args( struct xfs_da_args *args) { int error; switch (xfs_dir2_format(args, &error)) { case XFS_DIR2_FMT_SF: error = xfs_dir2_sf_lookup(args); break; case XFS_DIR2_FMT_BLOCK: error = xfs_dir2_block_lookup(args); break; case XFS_DIR2_FMT_LEAF: error = xfs_dir2_leaf_lookup(args); break; case XFS_DIR2_FMT_NODE: error = xfs_dir2_node_lookup(args); break; default: break; } if (error != -EEXIST) return error; return 0; } /* * Lookup a name in a directory, give back the inode number. * If ci_name is not NULL, returns the actual name in ci_name if it differs * to name, or ci_name->name is set to NULL for an exact match. */ int xfs_dir_lookup( struct xfs_trans *tp, struct xfs_inode *dp, const struct xfs_name *name, xfs_ino_t *inum, /* out: inode number */ struct xfs_name *ci_name) /* out: actual name if CI match */ { struct xfs_da_args *args; int rval; int lock_mode; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); XFS_STATS_INC(dp->i_mount, xs_dir_lookup); args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; args->filetype = name->type; args->hashval = xfs_dir2_hashname(dp->i_mount, name); args->dp = dp; args->whichfork = XFS_DATA_FORK; args->trans = tp; args->op_flags = XFS_DA_OP_OKNOENT; args->owner = dp->i_ino; if (ci_name) args->op_flags |= XFS_DA_OP_CILOOKUP; lock_mode = xfs_ilock_data_map_shared(dp); rval = xfs_dir_lookup_args(args); if (!rval) { *inum = args->inumber; if (ci_name) { ci_name->name = args->value; ci_name->len = args->valuelen; } } xfs_iunlock(dp, lock_mode); kfree(args); return rval; } int xfs_dir_removename_args( struct xfs_da_args *args) { int error; switch (xfs_dir2_format(args, &error)) { case XFS_DIR2_FMT_SF: return xfs_dir2_sf_removename(args); case XFS_DIR2_FMT_BLOCK: return xfs_dir2_block_removename(args); case XFS_DIR2_FMT_LEAF: return xfs_dir2_leaf_removename(args); case XFS_DIR2_FMT_NODE: return xfs_dir2_node_removename(args); default: return error; } } /* * Remove an entry from a directory. */ int xfs_dir_removename( struct xfs_trans *tp, struct xfs_inode *dp, const struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t total) /* bmap's total block count */ { struct xfs_da_args *args; int rval; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); XFS_STATS_INC(dp->i_mount, xs_dir_remove); args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL); if (!args) return -ENOMEM; args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; args->filetype = name->type; args->hashval = xfs_dir2_hashname(dp->i_mount, name); args->inumber = ino; args->dp = dp; args->total = total; args->whichfork = XFS_DATA_FORK; args->trans = tp; args->owner = dp->i_ino; rval = xfs_dir_removename_args(args); kfree(args); return rval; } int xfs_dir_replace_args( struct xfs_da_args *args) { int error; switch (xfs_dir2_format(args, &error)) { case XFS_DIR2_FMT_SF: return xfs_dir2_sf_replace(args); case XFS_DIR2_FMT_BLOCK: return xfs_dir2_block_replace(args); case XFS_DIR2_FMT_LEAF: return xfs_dir2_leaf_replace(args); case XFS_DIR2_FMT_NODE: return xfs_dir2_node_replace(args); default: return error; } } /* * Replace the inode number of a directory entry. */ int xfs_dir_replace( struct xfs_trans *tp, struct xfs_inode *dp, const struct xfs_name *name, /* name of entry to replace */ xfs_ino_t inum, /* new inode number */ xfs_extlen_t total) /* bmap's total block count */ { struct xfs_da_args *args; int rval; ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); rval = xfs_dir_ino_validate(tp->t_mountp, inum); if (rval) return rval; args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL); if (!args) return -ENOMEM; args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; args->filetype = name->type; args->hashval = xfs_dir2_hashname(dp->i_mount, name); args->inumber = inum; args->dp = dp; args->total = total; args->whichfork = XFS_DATA_FORK; args->trans = tp; args->owner = dp->i_ino; rval = xfs_dir_replace_args(args); kfree(args); return rval; } /* * See if this entry can be added to the directory without allocating space. */ int xfs_dir_canenter( struct xfs_trans *tp, struct xfs_inode *dp, const struct xfs_name *name) /* name of entry to add */ { return xfs_dir_createname(tp, dp, name, 0, 0); } /* * Utility routines. */ /* * Add a block to the directory. * * This routine is for data and free blocks, not leaf/node blocks which are * handled by xfs_da_grow_inode. */ int xfs_dir2_grow_inode( struct xfs_da_args *args, int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */ xfs_dir2_db_t *dbp) /* out: block number added */ { struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; xfs_fileoff_t bno; /* directory offset of new block */ int count; /* count of filesystem blocks */ int error; trace_xfs_dir2_grow_inode(args, space); /* * Set lowest possible block in the space requested. */ bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE); count = args->geo->fsbcount; error = xfs_da_grow_inode_int(args, &bno, count); if (error) return error; *dbp = xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)bno); /* * Update file's size if this is the data space and it grew. */ if (space == XFS_DIR2_DATA_SPACE) { xfs_fsize_t size; /* directory file (data) size */ size = XFS_FSB_TO_B(mp, bno + count); if (size > dp->i_disk_size) { dp->i_disk_size = size; xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); } } return 0; } /* * Remove the given block from the directory. * This routine is used for data and free blocks, leaf/node are done * by xfs_da_shrink_inode. */ int xfs_dir2_shrink_inode( struct xfs_da_args *args, xfs_dir2_db_t db, struct xfs_buf *bp) { xfs_fileoff_t bno; /* directory file offset */ xfs_dablk_t da; /* directory file offset */ int done; /* bunmap is finished */ struct xfs_inode *dp; int error; struct xfs_mount *mp; struct xfs_trans *tp; trace_xfs_dir2_shrink_inode(args, db); dp = args->dp; mp = dp->i_mount; tp = args->trans; da = xfs_dir2_db_to_da(args->geo, db); /* Unmap the fsblock(s). */ error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0, &done); if (error) { /* * ENOSPC actually can happen if we're in a removename with no * space reservation, and the resulting block removal would * cause a bmap btree split or conversion from extents to btree. * This can only happen for un-fragmented directory blocks, * since you need to be punching out the middle of an extent. * In this case we need to leave the block in the file, and not * binval it. So the block has to be in a consistent empty * state and appropriately logged. We don't free up the buffer, * the caller can tell it hasn't happened since it got an error * back. */ return error; } ASSERT(done); /* * Invalidate the buffer from the transaction. */ xfs_trans_binval(tp, bp); /* * If it's not a data block, we're done. */ if (db >= xfs_dir2_byte_to_db(args->geo, XFS_DIR2_LEAF_OFFSET)) return 0; /* * If the block isn't the last one in the directory, we're done. */ if (dp->i_disk_size > xfs_dir2_db_off_to_byte(args->geo, db + 1, 0)) return 0; bno = da; if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) { /* * This can't really happen unless there's kernel corruption. */ return error; } if (db == args->geo->datablk) ASSERT(bno == 0); else ASSERT(bno > 0); /* * Set the size to the new last block. */ dp->i_disk_size = XFS_FSB_TO_B(mp, bno); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); return 0; } /* Returns true if the directory entry name is valid. */ bool xfs_dir2_namecheck( const void *name, size_t length) { /* * MAXNAMELEN includes the trailing null, but (name/length) leave it * out, so use >= for the length check. */ if (length >= MAXNAMELEN) return false; /* There shouldn't be any slashes or nulls here */ return !memchr(name, '/', length) && !memchr(name, 0, length); } xfs_dahash_t xfs_dir2_hashname( struct xfs_mount *mp, const struct xfs_name *name) { if (unlikely(xfs_has_asciici(mp))) return xfs_ascii_ci_hashname(name); return xfs_da_hashname(name->name, name->len); } enum xfs_dacmp xfs_dir2_compname( struct xfs_da_args *args, const unsigned char *name, int len) { if (unlikely(xfs_has_asciici(args->dp->i_mount))) return xfs_ascii_ci_compname(args, name, len); return xfs_da_compname(args, name, len); } #ifdef CONFIG_XFS_LIVE_HOOKS /* * Use a static key here to reduce the overhead of directory live update hooks. * If the compiler supports jump labels, the static branch will be replaced by * a nop sled when there are no hook users. Online fsck is currently the only * caller, so this is a reasonable tradeoff. * * Note: Patching the kernel code requires taking the cpu hotplug lock. Other * parts of the kernel allocate memory with that lock held, which means that * XFS callers cannot hold any locks that might be used by memory reclaim or * writeback when calling the static_branch_{inc,dec} functions. */ DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dir_hooks_switch); void xfs_dir_hook_disable(void) { xfs_hooks_switch_off(&xfs_dir_hooks_switch); } void xfs_dir_hook_enable(void) { xfs_hooks_switch_on(&xfs_dir_hooks_switch); } /* Call hooks for a directory update relating to a child dirent update. */ inline void xfs_dir_update_hook( struct xfs_inode *dp, struct xfs_inode *ip, int delta, const struct xfs_name *name) { if (xfs_hooks_switched_on(&xfs_dir_hooks_switch)) { struct xfs_dir_update_params p = { .dp = dp, .ip = ip, .delta = delta, .name = name, }; struct xfs_mount *mp = ip->i_mount; xfs_hooks_call(&mp->m_dir_update_hooks, 0, &p); } } /* Call the specified function during a directory update. */ int xfs_dir_hook_add( struct xfs_mount *mp, struct xfs_dir_hook *hook) { return xfs_hooks_add(&mp->m_dir_update_hooks, &hook->dirent_hook); } /* Stop calling the specified function during a directory update. */ void xfs_dir_hook_del( struct xfs_mount *mp, struct xfs_dir_hook *hook) { xfs_hooks_del(&mp->m_dir_update_hooks, &hook->dirent_hook); } /* Configure directory update hook functions. */ void xfs_dir_hook_setup( struct xfs_dir_hook *hook, notifier_fn_t mod_fn) { xfs_hook_setup(&hook->dirent_hook, mod_fn); } #endif /* CONFIG_XFS_LIVE_HOOKS */ /* * Given a directory @dp, a newly allocated inode @ip, and a @name, link @ip * into @dp under the given @name. If @ip is a directory, it will be * initialized. Both inodes must have the ILOCK held and the transaction must * have sufficient blocks reserved. */ int xfs_dir_create_child( struct xfs_trans *tp, unsigned int resblks, struct xfs_dir_update *du) { struct xfs_inode *dp = du->dp; const struct xfs_name *name = du->name; struct xfs_inode *ip = du->ip; int error; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); xfs_assert_ilocked(dp, XFS_ILOCK_EXCL); error = xfs_dir_createname(tp, dp, name, ip->i_ino, resblks); if (error) { ASSERT(error != -ENOSPC); return error; } xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); if (S_ISDIR(VFS_I(ip)->i_mode)) { error = xfs_dir_init(tp, ip, dp); if (error) return error; xfs_bumplink(tp, dp); } /* * If we have parent pointers, we need to add the attribute containing * the parent information now. */ if (du->ppargs) { error = xfs_parent_addname(tp, du->ppargs, dp, name, ip); if (error) return error; } xfs_dir_update_hook(dp, ip, 1, name); return 0; } /* * Given a directory @dp, an existing non-directory inode @ip, and a @name, * link @ip into @dp under the given @name. Both inodes must have the ILOCK * held. */ int xfs_dir_add_child( struct xfs_trans *tp, unsigned int resblks, struct xfs_dir_update *du) { struct xfs_inode *dp = du->dp; const struct xfs_name *name = du->name; struct xfs_inode *ip = du->ip; struct xfs_mount *mp = tp->t_mountp; int error; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); xfs_assert_ilocked(dp, XFS_ILOCK_EXCL); ASSERT(!S_ISDIR(VFS_I(ip)->i_mode)); if (!resblks) { error = xfs_dir_canenter(tp, dp, name); if (error) return error; } /* * Handle initial link state of O_TMPFILE inode */ if (VFS_I(ip)->i_nlink == 0) { struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); error = xfs_iunlink_remove(tp, pag, ip); xfs_perag_put(pag); if (error) return error; } error = xfs_dir_createname(tp, dp, name, ip->i_ino, resblks); if (error) return error; xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); xfs_bumplink(tp, ip); /* * If we have parent pointers, we now need to add the parent record to * the attribute fork of the inode. If this is the initial parent * attribute, we need to create it correctly, otherwise we can just add * the parent to the inode. */ if (du->ppargs) { error = xfs_parent_addname(tp, du->ppargs, dp, name, ip); if (error) return error; } xfs_dir_update_hook(dp, ip, 1, name); return 0; } /* * Given a directory @dp, a child @ip, and a @name, remove the (@name, @ip) * entry from the directory. Both inodes must have the ILOCK held. */ int xfs_dir_remove_child( struct xfs_trans *tp, unsigned int resblks, struct xfs_dir_update *du) { struct xfs_inode *dp = du->dp; const struct xfs_name *name = du->name; struct xfs_inode *ip = du->ip; int error; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); xfs_assert_ilocked(dp, XFS_ILOCK_EXCL); /* * If we're removing a directory perform some additional validation. */ if (S_ISDIR(VFS_I(ip)->i_mode)) { ASSERT(VFS_I(ip)->i_nlink >= 2); if (VFS_I(ip)->i_nlink != 2) return -ENOTEMPTY; if (!xfs_dir_isempty(ip)) return -ENOTEMPTY; /* Drop the link from ip's "..". */ error = xfs_droplink(tp, dp); if (error) return error; /* Drop the "." link from ip to self. */ error = xfs_droplink(tp, ip); if (error) return error; /* * Point the unlinked child directory's ".." entry to the root * directory to eliminate back-references to inodes that may * get freed before the child directory is closed. If the fs * gets shrunk, this can lead to dirent inode validation errors. */ if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, tp->t_mountp->m_sb.sb_rootino, 0); if (error) return error; } } else { /* * When removing a non-directory we need to log the parent * inode here. For a directory this is done implicitly * by the xfs_droplink call for the ".." entry. */ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); } xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); /* Drop the link from dp to ip. */ error = xfs_droplink(tp, ip); if (error) return error; error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); if (error) { ASSERT(error != -ENOENT); return error; } /* Remove parent pointer. */ if (du->ppargs) { error = xfs_parent_removename(tp, du->ppargs, dp, name, ip); if (error) return error; } xfs_dir_update_hook(dp, ip, -1, name); return 0; } /* * Exchange the entry (@name1, @ip1) in directory @dp1 with the entry (@name2, * @ip2) in directory @dp2, and update '..' @ip1 and @ip2's entries as needed. * @ip1 and @ip2 need not be of the same type. * * All inodes must have the ILOCK held, and both entries must already exist. */ int xfs_dir_exchange_children( struct xfs_trans *tp, struct xfs_dir_update *du1, struct xfs_dir_update *du2, unsigned int spaceres) { struct xfs_inode *dp1 = du1->dp; const struct xfs_name *name1 = du1->name; struct xfs_inode *ip1 = du1->ip; struct xfs_inode *dp2 = du2->dp; const struct xfs_name *name2 = du2->name; struct xfs_inode *ip2 = du2->ip; int ip1_flags = 0; int ip2_flags = 0; int dp2_flags = 0; int error; /* Swap inode number for dirent in first parent */ error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); if (error) return error; /* Swap inode number for dirent in second parent */ error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); if (error) return error; /* * If we're renaming one or more directories across different parents, * update the respective ".." entries (and link counts) to match the new * parents. */ if (dp1 != dp2) { dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; if (S_ISDIR(VFS_I(ip2)->i_mode)) { error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, dp1->i_ino, spaceres); if (error) return error; /* transfer ip2 ".." reference to dp1 */ if (!S_ISDIR(VFS_I(ip1)->i_mode)) { error = xfs_droplink(tp, dp2); if (error) return error; xfs_bumplink(tp, dp1); } /* * Although ip1 isn't changed here, userspace needs * to be warned about the change, so that applications * relying on it (like backup ones), will properly * notify the change */ ip1_flags |= XFS_ICHGTIME_CHG; ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; } if (S_ISDIR(VFS_I(ip1)->i_mode)) { error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, dp2->i_ino, spaceres); if (error) return error; /* transfer ip1 ".." reference to dp2 */ if (!S_ISDIR(VFS_I(ip2)->i_mode)) { error = xfs_droplink(tp, dp1); if (error) return error; xfs_bumplink(tp, dp2); } /* * Although ip2 isn't changed here, userspace needs * to be warned about the change, so that applications * relying on it (like backup ones), will properly * notify the change */ ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; ip2_flags |= XFS_ICHGTIME_CHG; } } if (ip1_flags) { xfs_trans_ichgtime(tp, ip1, ip1_flags); xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); } if (ip2_flags) { xfs_trans_ichgtime(tp, ip2, ip2_flags); xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); } if (dp2_flags) { xfs_trans_ichgtime(tp, dp2, dp2_flags); xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); } xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); /* Schedule parent pointer replacements */ if (du1->ppargs) { error = xfs_parent_replacename(tp, du1->ppargs, dp1, name1, dp2, name2, ip1); if (error) return error; } if (du2->ppargs) { error = xfs_parent_replacename(tp, du2->ppargs, dp2, name2, dp1, name1, ip2); if (error) return error; } /* * Inform our hook clients that we've finished an exchange operation as * follows: removed the source and target files from their directories; * added the target to the source directory; and added the source to * the target directory. All inodes are locked, so it's ok to model a * rename this way so long as we say we deleted entries before we add * new ones. */ xfs_dir_update_hook(dp1, ip1, -1, name1); xfs_dir_update_hook(dp2, ip2, -1, name2); xfs_dir_update_hook(dp1, ip2, 1, name1); xfs_dir_update_hook(dp2, ip1, 1, name2); return 0; } /* * Given an entry (@src_name, @src_ip) in directory @src_dp, make the entry * @target_name in directory @target_dp point to @src_ip and remove the * original entry, cleaning up everything left behind. * * Cleanup involves dropping a link count on @target_ip, and either removing * the (@src_name, @src_ip) entry from @src_dp or simply replacing the entry * with (@src_name, @wip) if a whiteout inode @wip is supplied. * * All inodes must have the ILOCK held. We assume that if @src_ip is a * directory then its '..' doesn't already point to @target_dp, and that @wip * is a freshly allocated whiteout. */ int xfs_dir_rename_children( struct xfs_trans *tp, struct xfs_dir_update *du_src, struct xfs_dir_update *du_tgt, unsigned int spaceres, struct xfs_dir_update *du_wip) { struct xfs_mount *mp = tp->t_mountp; struct xfs_inode *src_dp = du_src->dp; const struct xfs_name *src_name = du_src->name; struct xfs_inode *src_ip = du_src->ip; struct xfs_inode *target_dp = du_tgt->dp; const struct xfs_name *target_name = du_tgt->name; struct xfs_inode *target_ip = du_tgt->ip; bool new_parent = (src_dp != target_dp); bool src_is_directory; int error; src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); /* * Check for expected errors before we dirty the transaction * so we can return an error without a transaction abort. */ if (target_ip == NULL) { /* * If there's no space reservation, check the entry will * fit before actually inserting it. */ if (!spaceres) { error = xfs_dir_canenter(tp, target_dp, target_name); if (error) return error; } } else { /* * If target exists and it's a directory, check that whether * it can be destroyed. */ if (S_ISDIR(VFS_I(target_ip)->i_mode) && (!xfs_dir_isempty(target_ip) || (VFS_I(target_ip)->i_nlink > 2))) return -EEXIST; } /* * Directory entry creation below may acquire the AGF. Remove * the whiteout from the unlinked list first to preserve correct * AGI/AGF locking order. This dirties the transaction so failures * after this point will abort and log recovery will clean up the * mess. * * For whiteouts, we need to bump the link count on the whiteout * inode. After this point, we have a real link, clear the tmpfile * state flag from the inode so it doesn't accidentally get misused * in future. */ if (du_wip->ip) { struct xfs_perag *pag; ASSERT(VFS_I(du_wip->ip)->i_nlink == 0); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, du_wip->ip->i_ino)); error = xfs_iunlink_remove(tp, pag, du_wip->ip); xfs_perag_put(pag); if (error) return error; xfs_bumplink(tp, du_wip->ip); } /* * Set up the target. */ if (target_ip == NULL) { /* * If target does not exist and the rename crosses * directories, adjust the target directory link count * to account for the ".." reference from the new entry. */ error = xfs_dir_createname(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) return error; xfs_trans_ichgtime(tp, target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); if (new_parent && src_is_directory) { xfs_bumplink(tp, target_dp); } } else { /* target_ip != NULL */ /* * Link the source inode under the target name. * If the source inode is a directory and we are moving * it across directories, its ".." entry will be * inconsistent until we replace that down below. * * In case there is already an entry with the same * name at the destination directory, remove it first. */ error = xfs_dir_replace(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) return error; xfs_trans_ichgtime(tp, target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); /* * Decrement the link count on the target since the target * dir no longer points to it. */ error = xfs_droplink(tp, target_ip); if (error) return error; if (src_is_directory) { /* * Drop the link from the old "." entry. */ error = xfs_droplink(tp, target_ip); if (error) return error; } } /* target_ip != NULL */ /* * Remove the source. */ if (new_parent && src_is_directory) { /* * Rewrite the ".." entry to point to the new * directory. */ error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, target_dp->i_ino, spaceres); ASSERT(error != -EEXIST); if (error) return error; } /* * We always want to hit the ctime on the source inode. * * This isn't strictly required by the standards since the source * inode isn't really being changed, but old unix file systems did * it and some incremental backup programs won't work without it. */ xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); /* * Adjust the link count on src_dp. This is necessary when * renaming a directory, either within one parent when * the target existed, or across two parent directories. */ if (src_is_directory && (new_parent || target_ip != NULL)) { /* * Decrement link count on src_directory since the * entry that's moved no longer points to it. */ error = xfs_droplink(tp, src_dp); if (error) return error; } /* * For whiteouts, we only need to update the source dirent with the * inode number of the whiteout inode rather than removing it * altogether. */ if (du_wip->ip) error = xfs_dir_replace(tp, src_dp, src_name, du_wip->ip->i_ino, spaceres); else error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, spaceres); if (error) return error; xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); if (new_parent) xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); /* Schedule parent pointer updates. */ if (du_wip->ppargs) { error = xfs_parent_addname(tp, du_wip->ppargs, src_dp, src_name, du_wip->ip); if (error) return error; } if (du_src->ppargs) { error = xfs_parent_replacename(tp, du_src->ppargs, src_dp, src_name, target_dp, target_name, src_ip); if (error) return error; } if (du_tgt->ppargs) { error = xfs_parent_removename(tp, du_tgt->ppargs, target_dp, target_name, target_ip); if (error) return error; } /* * Inform our hook clients that we've finished a rename operation as * follows: removed the source and target files from their directories; * that we've added the source to the target directory; and finally * that we've added the whiteout, if there was one. All inodes are * locked, so it's ok to model a rename this way so long as we say we * deleted entries before we add new ones. */ if (target_ip) xfs_dir_update_hook(target_dp, target_ip, -1, target_name); xfs_dir_update_hook(src_dp, src_ip, -1, src_name); xfs_dir_update_hook(target_dp, src_ip, 1, target_name); if (du_wip->ip) xfs_dir_update_hook(src_dp, du_wip->ip, 1, src_name); return 0; } |
| 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 | // SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Generic netlink support functions to configure an SMC-R PNET table * * Copyright IBM Corp. 2016 * * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <net/netlink.h> #include <net/genetlink.h> #include <uapi/linux/if.h> #include <uapi/linux/smc.h> #include <rdma/ib_verbs.h> #include <net/netns/generic.h> #include "smc_netns.h" #include "smc_pnet.h" #include "smc_ib.h" #include "smc_ism.h" #include "smc_core.h" static struct net_device *__pnet_find_base_ndev(struct net_device *ndev); static struct net_device *pnet_find_base_ndev(struct net_device *ndev); static const struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, .len = SMC_MAX_PNETID_LEN }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [SMC_PNETID_IBNAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX - 1 }, [SMC_PNETID_IBPORT] = { .type = NLA_U8 } }; static struct genl_family smc_pnet_nl_family; enum smc_pnet_nametype { SMC_PNET_ETH = 1, SMC_PNET_IB = 2, }; /* pnet entry stored in pnet table */ struct smc_pnetentry { struct list_head list; char pnet_name[SMC_MAX_PNETID_LEN + 1]; enum smc_pnet_nametype type; union { struct { char eth_name[IFNAMSIZ + 1]; struct net_device *ndev; netdevice_tracker dev_tracker; }; struct { char ib_name[IB_DEVICE_NAME_MAX + 1]; u8 ib_port; }; }; }; /* Check if the pnetid is set */ bool smc_pnet_is_pnetid_set(u8 *pnetid) { if (pnetid[0] == 0 || pnetid[0] == _S) return false; return true; } /* Check if two given pnetids match */ static bool smc_pnet_match(u8 *pnetid1, u8 *pnetid2) { int i; for (i = 0; i < SMC_MAX_PNETID_LEN; i++) { if ((pnetid1[i] == 0 || pnetid1[i] == _S) && (pnetid2[i] == 0 || pnetid2[i] == _S)) break; if (pnetid1[i] != pnetid2[i]) return false; } return true; } /* Remove a pnetid from the pnet table. */ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct smc_ib_device *ibdev; struct smcd_dev *smcd; struct smc_net *sn; int rc = -ENOENT; int ibport; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* remove table entry */ mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || smc_pnet_match(pnetelem->pnet_name, pnet_name)) { list_del(&pnetelem->list); if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pr_warn_ratelimited("smc: net device %s " "erased user defined " "pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); } kfree(pnetelem); rc = 0; } } mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) return rc; /* remove ib devices */ mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) { if (ibdev->pnetid_by_user[ibport] && (!pnet_name || smc_pnet_match(pnet_name, ibdev->pnetid[ibport]))) { pr_warn_ratelimited("smc: ib device %s ibport " "%d erased user defined " "pnetid %.16s\n", ibdev->ibdev->name, ibport + 1, ibdev->pnetid[ibport]); memset(ibdev->pnetid[ibport], 0, SMC_MAX_PNETID_LEN); ibdev->pnetid_by_user[ibport] = false; rc = 0; } } } mutex_unlock(&smc_ib_devices.mutex); /* remove smcd devices */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { if (smcd->pnetid_by_user && (!pnet_name || smc_pnet_match(pnet_name, smcd->pnetid))) { pr_warn_ratelimited("smc: smcd device %s " "erased user defined pnetid " "%.16s\n", dev_name(&smcd->dibs->dev), smcd->pnetid); memset(smcd->pnetid, 0, SMC_MAX_PNETID_LEN); smcd->pnetid_by_user = false; rc = 0; } } mutex_unlock(&smcd_dev_list.mutex); return rc; } /* Add the reference to a given network device to the pnet table. */ static int smc_pnet_add_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { netdev_hold(ndev, &pnetelem->dev_tracker, GFP_ATOMIC); pnetelem->ndev = ndev; rc = 0; pr_warn_ratelimited("smc: adding net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Remove the reference to a given network device from the pnet table. */ static int smc_pnet_remove_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pnetelem->ndev = NULL; rc = 0; pr_warn_ratelimited("smc: removing net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Apply pnetid to ib device when no pnetid is set. */ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port, char *pnet_name) { bool applied = false; mutex_lock(&smc_ib_devices.mutex); if (!smc_pnet_is_pnetid_set(ib_dev->pnetid[ib_port - 1])) { memcpy(ib_dev->pnetid[ib_port - 1], pnet_name, SMC_MAX_PNETID_LEN); ib_dev->pnetid_by_user[ib_port - 1] = true; applied = true; } mutex_unlock(&smc_ib_devices.mutex); return applied; } /* Apply pnetid to smcd device when no pnetid is set. */ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name) { bool applied = false; mutex_lock(&smcd_dev_list.mutex); if (!smc_pnet_is_pnetid_set(smcd_dev->pnetid)) { memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN); smcd_dev->pnetid_by_user = true; applied = true; } mutex_unlock(&smcd_dev_list.mutex); return applied; } /* The limit for pnetid is 16 characters. * Valid characters should be (single-byte character set) a-z, A-Z, 0-9. * Lower case letters are converted to upper case. * Interior blanks should not be used. */ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) { char *bf = skip_spaces(pnet_name); size_t len = strlen(bf); char *end = bf + len; if (!len) return false; while (--end >= bf && isspace(*end)) ; if (end - bf >= SMC_MAX_PNETID_LEN) return false; while (bf <= end) { if (!isalnum(*bf)) return false; *pnetid++ = islower(*bf) ? toupper(*bf) : *bf; bf++; } *pnetid = '\0'; return true; } /* Find an infiniband device by a given name. The device might not exist. */ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) { struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || (ibdev->ibdev->dev.parent && !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, IB_DEVICE_NAME_MAX - 1))) { goto out; } } ibdev = NULL; out: mutex_unlock(&smc_ib_devices.mutex); return ibdev; } /* Find an smcd device by a given name. The device might not exist. */ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name) { struct smcd_dev *smcd_dev; mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { if (!strncmp(dev_name(&smcd_dev->dibs->dev), smcd_name, IB_DEVICE_NAME_MAX - 1) || (smcd_dev->dibs->dev.parent && !strncmp(dev_name(smcd_dev->dibs->dev.parent), smcd_name, IB_DEVICE_NAME_MAX - 1))) goto out; } smcd_dev = NULL; out: mutex_unlock(&smcd_dev_list.mutex); return smcd_dev; } static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, char *eth_name, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct net_device *ndev, *base_ndev; u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; bool new_netdev; int rc; /* check if (base) netdev already has a pnetid. If there is one, we do * not want to add a pnet table entry */ rc = -EEXIST; ndev = dev_get_by_name(net, eth_name); /* dev_hold() */ if (ndev) { base_ndev = pnet_find_base_ndev(ndev); if (!smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid)) goto out_put; } /* add a new netdev entry to the pnet table if there isn't one */ rc = -ENOMEM; new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) goto out_put; new_pe->type = SMC_PNET_ETH; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strscpy(new_pe->eth_name, eth_name); rc = -EEXIST; new_netdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { new_netdev = false; break; } } if (new_netdev) { if (ndev) { new_pe->ndev = ndev; netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_ATOMIC); } list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } if (ndev) pr_warn_ratelimited("smc: net device %s " "applied user defined pnetid %.16s\n", new_pe->eth_name, new_pe->pnet_name); return 0; out_put: dev_put(ndev); return rc; } static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, u8 ib_port, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct smc_ib_device *ib_dev; bool smcddev_applied = true; bool ibdev_applied = true; struct smcd_dev *smcd; bool new_ibdev; /* try to apply the pnetid to active devices */ ib_dev = smc_pnet_find_ib(ib_name); if (ib_dev) { ibdev_applied = smc_pnet_apply_ib(ib_dev, ib_port, pnet_name); if (ibdev_applied) pr_warn_ratelimited("smc: ib device %s ibport %d " "applied user defined pnetid " "%.16s\n", ib_dev->ibdev->name, ib_port, ib_dev->pnetid[ib_port - 1]); } smcd = smc_pnet_find_smcd(ib_name); if (smcd) { smcddev_applied = smc_pnet_apply_smcd(smcd, pnet_name); if (smcddev_applied) { pr_warn_ratelimited("smc: smcd device %s applied user defined pnetid %.16s\n", dev_name(&smcd->dibs->dev), smcd->pnetid); } } /* Apply fails when a device has a hardware-defined pnetid set, do not * add a pnet table entry in that case. */ if (!ibdev_applied || !smcddev_applied) return -EEXIST; /* add a new ib entry to the pnet table if there isn't one */ new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) return -ENOMEM; new_pe->type = SMC_PNET_IB; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strscpy(new_pe->ib_name, ib_name); new_pe->ib_port = ib_port; new_ibdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { new_ibdev = false; break; } } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; } /* Append a pnetid to the end of the pnet table if not already on this list. */ static int smc_pnet_enter(struct net *net, struct nlattr *tb[]) { char pnet_name[SMC_MAX_PNETID_LEN + 1]; struct smc_pnettable *pnettable; bool new_netdev = false; bool new_ibdev = false; struct smc_net *sn; u8 ibport = 1; char *string; int rc; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; rc = -EINVAL; if (!tb[SMC_PNETID_NAME]) goto error; string = (char *)nla_data(tb[SMC_PNETID_NAME]); if (!smc_pnetid_valid(string, pnet_name)) goto error; if (tb[SMC_PNETID_ETHNAME]) { string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); rc = smc_pnet_add_eth(pnettable, net, string, pnet_name); if (!rc) new_netdev = true; else if (rc != -EEXIST) goto error; } /* if this is not the initial namespace, stop here */ if (net != &init_net) return new_netdev ? 0 : -EEXIST; rc = -EINVAL; if (tb[SMC_PNETID_IBNAME]) { string = (char *)nla_data(tb[SMC_PNETID_IBNAME]); string = strim(string); if (tb[SMC_PNETID_IBPORT]) { ibport = nla_get_u8(tb[SMC_PNETID_IBPORT]); if (ibport < 1 || ibport > SMC_MAX_PORTS) goto error; } rc = smc_pnet_add_ib(pnettable, string, ibport, pnet_name); if (!rc) new_ibdev = true; else if (rc != -EEXIST) goto error; } return (new_netdev || new_ibdev) ? 0 : -EEXIST; error: return rc; } /* Convert an smc_pnetentry to a netlink attribute sequence */ static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem) { if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name)) return -1; if (pnetelem->type == SMC_PNET_ETH) { if (nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->eth_name)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_ETHNAME, "n/a")) return -1; } if (pnetelem->type == SMC_PNET_IB) { if (nla_put_string(msg, SMC_PNETID_IBNAME, pnetelem->ib_name) || nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_IBNAME, "n/a") || nla_put_u8(msg, SMC_PNETID_IBPORT, 0xff)) return -1; } return 0; } static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); return smc_pnet_enter(net, info->attrs); } static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; return smc_pnet_remove_by_pnetid(net, (char *)nla_data(info->attrs[SMC_PNETID_NAME])); } static int smc_pnet_dump_start(struct netlink_callback *cb) { cb->args[0] = 0; return 0; } static int smc_pnet_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, u32 flags, struct smc_pnetentry *pnetelem) { void *hdr; hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family, flags, SMC_PNETID_GET); if (!hdr) return -ENOMEM; if (smc_pnet_set_nla(skb, pnetelem) < 0) { genlmsg_cancel(skb, hdr); return -EMSGSIZE; } genlmsg_end(skb, hdr); return 0; } static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, u32 seq, u8 *pnetid, int start_idx) { struct smc_pnettable *pnettable; struct smc_pnetentry *pnetelem; struct smc_net *sn; int idx = 0; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* dump pnettable entries */ mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; if (idx++ < start_idx) continue; /* if this is not the initial namespace, dump only netdev */ if (net != &init_net && pnetelem->type != SMC_PNET_ETH) continue; if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI, pnetelem)) { --idx; break; } } mutex_unlock(&pnettable->lock); return idx; } static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx; idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NULL, cb->args[0]); cb->args[0] = idx; return skb->len; } /* Retrieve one PNETID entry */ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct sk_buff *msg; void *hdr; if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; _smc_pnet_dump(net, msg, info->snd_portid, info->snd_seq, nla_data(info->attrs[SMC_PNETID_NAME]), 0); /* finish multi part message and send it */ hdr = nlmsg_put(msg, info->snd_portid, info->snd_seq, NLMSG_DONE, 0, NLM_F_MULTI); if (!hdr) { nlmsg_free(msg); return -EMSGSIZE; } return genlmsg_reply(msg, info); } /* Remove and delete all pnetids from pnet table. */ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); smc_pnet_remove_by_pnetid(net, NULL); return 0; } /* SMC_PNETID generic netlink operation definition */ static const struct genl_ops smc_pnet_ops[] = { { .cmd = SMC_PNETID_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, /* can be retrieved by unprivileged users */ .doit = smc_pnet_get, .dumpit = smc_pnet_dump, .start = smc_pnet_dump_start }, { .cmd = SMC_PNETID_ADD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_add }, { .cmd = SMC_PNETID_DEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_del }, { .cmd = SMC_PNETID_FLUSH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_flush } }; /* SMC_PNETID family definition */ static struct genl_family smc_pnet_nl_family __ro_after_init = { .hdrsize = 0, .name = SMCR_GENL_FAMILY_NAME, .version = SMCR_GENL_FAMILY_VERSION, .maxattr = SMC_PNETID_MAX, .policy = smc_pnet_policy, .netnsok = true, .module = THIS_MODULE, .ops = smc_pnet_ops, .n_ops = ARRAY_SIZE(smc_pnet_ops), .resv_start_op = SMC_PNETID_FLUSH + 1, }; bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe; bool rc = false; read_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pe, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { rc = true; goto unlock; } } unlock: read_unlock(&sn->pnetids_ndev.lock); return rc; } static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pi; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pi, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pi->pnetid)) { refcount_inc(&pi->refcnt); kfree(pe); goto unlock; } } refcount_set(&pe->refcnt, 1); memcpy(pe->pnetid, pnetid, SMC_MAX_PNETID_LEN); list_add_tail(&pe->list, &sn->pnetids_ndev.list); unlock: write_unlock(&sn->pnetids_ndev.lock); return 0; } static void smc_pnet_remove_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pe2; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, pe2, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { if (refcount_dec_and_test(&pe->refcnt)) { list_del(&pe->list); kfree(pe); } break; } } write_unlock(&sn->pnetids_ndev.lock); } static void smc_pnet_add_base_pnetid(struct net *net, struct net_device *dev, u8 *ndev_pnetid) { struct net_device *base_dev; base_dev = __pnet_find_base_ndev(dev); if (base_dev->flags & IFF_UP && !smc_pnetid_by_dev_port(base_dev->dev.parent, base_dev->dev_port, ndev_pnetid)) { /* add to PNETIDs list */ smc_pnet_add_pnetid(net, ndev_pnetid); } } /* create initial list of netdevice pnetids */ static void smc_pnet_create_pnetids_list(struct net *net) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net_device *dev; /* Newly created netns do not have devices. * Do not even acquire rtnl. */ if (list_empty(&net->dev_base_head)) return; /* Note: This might not be needed, because smc_pnet_netdev_event() * is also calling smc_pnet_add_base_pnetid() when handling * NETDEV_UP event. */ rtnl_lock(); for_each_netdev(net, dev) smc_pnet_add_base_pnetid(net, dev, ndev_pnetid); rtnl_unlock(); } /* clean up list of netdevice pnetids */ static void smc_pnet_destroy_pnetids_list(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *temp_pe; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, temp_pe, &sn->pnetids_ndev.list, list) { list_del(&pe->list); kfree(pe); } write_unlock(&sn->pnetids_ndev.lock); } static int smc_pnet_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(event_dev); u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; switch (event) { case NETDEV_REBOOT: case NETDEV_UNREGISTER: smc_pnet_remove_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_REGISTER: smc_pnet_add_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_UP: smc_pnet_add_base_pnetid(net, event_dev, ndev_pnetid); return NOTIFY_OK; case NETDEV_DOWN: event_dev = __pnet_find_base_ndev(event_dev); if (!smc_pnetid_by_dev_port(event_dev->dev.parent, event_dev->dev_port, ndev_pnetid)) { /* remove from PNETIDs list */ smc_pnet_remove_pnetid(net, ndev_pnetid); } return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block smc_netdev_notifier = { .notifier_call = smc_pnet_netdev_event }; /* init network namespace */ int smc_pnet_net_init(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnettable *pnettable = &sn->pnettable; struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); smc_pnet_create_pnetids_list(net); return 0; } int __init smc_pnet_init(void) { int rc; rc = genl_register_family(&smc_pnet_nl_family); if (rc) return rc; rc = register_netdevice_notifier(&smc_netdev_notifier); if (rc) genl_unregister_family(&smc_pnet_nl_family); return rc; } /* exit network namespace */ void smc_pnet_net_exit(struct net *net) { /* flush pnet table */ smc_pnet_remove_by_pnetid(net, NULL); smc_pnet_destroy_pnetids_list(net); } void smc_pnet_exit(void) { unregister_netdevice_notifier(&smc_netdev_notifier); genl_unregister_family(&smc_pnet_nl_family); } static struct net_device *__pnet_find_base_ndev(struct net_device *ndev) { int i, nest_lvl; ASSERT_RTNL(); nest_lvl = ndev->lower_level; for (i = 0; i < nest_lvl; i++) { struct list_head *lower = &ndev->adj_list.lower; if (list_empty(lower)) break; lower = lower->next; ndev = netdev_lower_get_next(ndev, &lower); } return ndev; } /* Determine one base device for stacked net devices. * If the lower device level contains more than one devices * (for instance with bonding slaves), just the first device * is used to reach a base device. */ static struct net_device *pnet_find_base_ndev(struct net_device *ndev) { rtnl_lock(); ndev = __pnet_find_base_ndev(ndev); rtnl_unlock(); return ndev; } static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, u8 *pnetid) { struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_pnetentry *pnetelem; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ memcpy(pnetid, pnetelem->pnet_name, SMC_MAX_PNETID_LEN); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i, struct smc_init_info *ini) { if (!ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->ib_gid, NULL, NULL)) { ini->ib_dev = ibdev; ini->ib_port = i; return 0; } if (ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->smcrv2.ib_gid_v2, NULL, &ini->smcrv2)) { ini->smcrv2.ib_dev_v2 = ibdev; ini->smcrv2.ib_port_v2 = i; return 0; } return -ENODEV; } /* find a roce device for the given pnetid */ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, struct smc_init_info *ini, struct smc_ib_device *known_dev, struct net *net) { struct smc_ib_device *ibdev; int i; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (ibdev == known_dev || !rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; if (smc_pnet_match(ibdev->pnetid[i - 1], pnet_id) && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) goto out; } } } out: mutex_unlock(&smc_ib_devices.mutex); } /* find alternate roce device with same pnet_id, vlan_id and net namespace */ void smc_pnet_find_alt_roce(struct smc_link_group *lgr, struct smc_init_info *ini, struct smc_ib_device *known_dev) { struct net *net = lgr->net; _smc_pnet_find_roce_by_pnetid(lgr->pnet_id, ini, known_dev, net); } /* if handshake network device belongs to a roce device, return its * IB device and port */ static void smc_pnet_find_rdma_dev(struct net_device *netdev, struct smc_init_info *ini) { struct net *net = dev_net(netdev); struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { struct net_device *ndev; int i; /* check rdma net namespace */ if (!rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; ndev = ib_device_get_netdev(ibdev->ibdev, i); if (!ndev) continue; dev_put(ndev); if (netdev == ndev && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) break; } } } mutex_unlock(&smc_ib_devices.mutex); } /* Determine the corresponding IB device port based on the hardware PNETID. * Searching stops at the first matching active IB device port with vlan_id * configured. * If nothing found, check pnetid table. * If nothing found, try to use handshake device */ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net_device *base_ndev; struct net *net; base_ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { smc_pnet_find_rdma_dev(base_ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); } static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct smcd_dev *ismdev; ndev = pnet_find_base_ndev(ndev); if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) return; /* pnetid could not be determined */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(ismdev, &smcd_dev_list.list, list) { if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) && !ismdev->going_away && (!ini->ism_peer_gid[0].gid || !smc_ism_cantalk(&ini->ism_peer_gid[0], ini->vlan_id, ismdev))) { ini->ism_dev[0] = ismdev; break; } } mutex_unlock(&smcd_dev_list.mutex); } /* PNET table analysis for a given sock: * determine ib_device and port belonging to used internal TCP socket * ethernet interface. */ void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini) { struct net_device *dev; struct dst_entry *dst; rcu_read_lock(); dst = __sk_dst_get(sk); dev = dst ? dst_dev_rcu(dst) : NULL; dev_hold(dev); rcu_read_unlock(); if (dev) { smc_pnet_find_roce_by_pnetid(dev, ini); dev_put(dev); } } void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini) { struct net_device *dev; struct dst_entry *dst; ini->ism_dev[0] = NULL; rcu_read_lock(); dst = __sk_dst_get(sk); dev = dst ? dst_dev_rcu(dst) : NULL; dev_hold(dev); rcu_read_unlock(); if (dev) { smc_pnet_find_ism_by_pnetid(dev, ini); dev_put(dev); } } /* Lookup and apply a pnet table entry to the given ib device. */ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) { char *ib_name = smcibdev->ibdev->name; struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && tmp_pe->ib_port == ib_port) { smc_pnet_apply_ib(smcibdev, ib_port, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } /* Lookup and apply a pnet table entry to the given smcd device. */ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) { struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && (!strncmp(tmp_pe->ib_name, dev_name(&smcddev->dibs->dev), sizeof(tmp_pe->ib_name)) || (smcddev->dibs->dev.parent && !strncmp(tmp_pe->ib_name, dev_name(smcddev->dibs->dev.parent), sizeof(tmp_pe->ib_name))))) { smc_pnet_apply_smcd(smcddev, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } |
| 2 3 3 3 3 3 4 1 3 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* af_can.c - Protocol family CAN core module * (used by different CAN protocol modules) * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/uaccess.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/can/can-ml.h> #include <linux/ratelimit.h> #include <net/net_namespace.h> #include <net/sock.h> #include "af_can.h" MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, " "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS_NETPROTO(PF_CAN); static int stats_timer __read_mostly = 1; module_param(stats_timer, int, 0444); MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); static struct kmem_cache *rcv_cache __read_mostly; /* table of registered CAN protocols */ static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; static DEFINE_MUTEX(proto_tab_lock); static atomic_t skbcounter = ATOMIC_INIT(0); /* af_can socket functions */ void can_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_error_queue); } EXPORT_SYMBOL(can_sock_destruct); static const struct can_proto *can_get_proto(int protocol) { const struct can_proto *cp; rcu_read_lock(); cp = rcu_dereference(proto_tab[protocol]); if (cp && !try_module_get(cp->prot->owner)) cp = NULL; rcu_read_unlock(); return cp; } static inline void can_put_proto(const struct can_proto *cp) { module_put(cp->prot->owner); } static int can_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; const struct can_proto *cp; int err = 0; sock->state = SS_UNCONNECTED; if (protocol < 0 || protocol >= CAN_NPROTO) return -EINVAL; cp = can_get_proto(protocol); #ifdef CONFIG_MODULES if (!cp) { /* try to load protocol module if kernel is modular */ err = request_module("can-proto-%d", protocol); /* In case of error we only print a message but don't * return the error code immediately. Below we will * return -EPROTONOSUPPORT */ if (err) pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n", protocol); cp = can_get_proto(protocol); } #endif /* check for available protocol and correct usage */ if (!cp) return -EPROTONOSUPPORT; if (cp->type != sock->type) { err = -EPROTOTYPE; goto errout; } sock->ops = cp->ops; sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern); if (!sk) { err = -ENOMEM; goto errout; } sock_init_data(sock, sk); sk->sk_destruct = can_sock_destruct; if (sk->sk_prot->init) err = sk->sk_prot->init(sk); if (err) { /* release sk on errors */ sock_orphan(sk); sock_put(sk); sock->sk = NULL; } else { sock_prot_inuse_add(net, sk->sk_prot, 1); } errout: can_put_proto(cp); return err; } /* af_can tx path */ /** * can_send - transmit a CAN frame (optional with local loopback) * @skb: pointer to socket buffer with CAN frame in data section * @loop: loopback for listeners on local CAN sockets (recommended default!) * * Due to the loopback this routine must not be called from hardirq context. * * Return: * 0 on success * -ENETDOWN when the selected interface is down * -ENOBUFS on full driver queue (see net_xmit_errno()) * -ENOMEM when local loopback failed at calling skb_clone() * -EPERM when trying to send on a non-CAN interface * -EMSGSIZE CAN frame size is bigger than CAN interface MTU * -EINVAL when the skb->data does not contain a valid CAN frame */ int can_send(struct sk_buff *skb, int loop) { struct sk_buff *newskb = NULL; struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats; int err = -EINVAL; if (can_is_canxl_skb(skb)) { skb->protocol = htons(ETH_P_CANXL); } else if (can_is_can_skb(skb)) { skb->protocol = htons(ETH_P_CAN); } else if (can_is_canfd_skb(skb)) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; skb->protocol = htons(ETH_P_CANFD); /* set CAN FD flag for CAN FD frames by default */ cfd->flags |= CANFD_FDF; } else { goto inval_skb; } /* Make sure the CAN frame can pass the selected CAN netdevice. */ if (unlikely(skb->len > READ_ONCE(skb->dev->mtu))) { err = -EMSGSIZE; goto inval_skb; } if (unlikely(skb->dev->type != ARPHRD_CAN)) { err = -EPERM; goto inval_skb; } if (unlikely(!(skb->dev->flags & IFF_UP))) { err = -ENETDOWN; goto inval_skb; } skb->ip_summed = CHECKSUM_UNNECESSARY; skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); if (loop) { /* local loopback of sent CAN frames */ /* indication for the CAN driver: do loopback */ skb->pkt_type = PACKET_LOOPBACK; /* The reference to the originating sock may be required * by the receiving socket to check whether the frame is * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS * Therefore we have to ensure that skb->sk remains the * reference to the originating sock by restoring skb->sk * after each skb_clone() or skb_orphan() usage. */ if (!(skb->dev->flags & IFF_ECHO)) { /* If the interface is not capable to do loopback * itself, we do it here. */ newskb = skb_clone(skb, GFP_ATOMIC); if (!newskb) { kfree_skb(skb); return -ENOMEM; } can_skb_set_owner(newskb, skb->sk); newskb->ip_summed = CHECKSUM_UNNECESSARY; newskb->pkt_type = PACKET_BROADCAST; } } else { /* indication for the CAN driver: no loopback required */ skb->pkt_type = PACKET_HOST; } /* send to netdevice */ err = dev_queue_xmit(skb); if (err > 0) err = net_xmit_errno(err); if (err) { kfree_skb(newskb); return err; } if (newskb) netif_rx(newskb); /* update statistics */ atomic_long_inc(&pkg_stats->tx_frames); atomic_long_inc(&pkg_stats->tx_frames_delta); return 0; inval_skb: kfree_skb(skb); return err; } EXPORT_SYMBOL(can_send); /* af_can rx path */ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net, struct net_device *dev) { if (dev) { struct can_ml_priv *can_ml = can_get_ml_priv(dev); return &can_ml->dev_rcv_lists; } else { return net->can.rx_alldev_list; } } /** * effhash - hash function for 29 bit CAN identifier reduction * @can_id: 29 bit CAN identifier * * Description: * To reduce the linear traversal in one linked list of _single_ EFF CAN * frame subscriptions the 29 bit identifier is mapped to 10 bits. * (see CAN_EFF_RCV_HASH_BITS definition) * * Return: * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask ) */ static unsigned int effhash(canid_t can_id) { unsigned int hash; hash = can_id; hash ^= can_id >> CAN_EFF_RCV_HASH_BITS; hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS); return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1); } /** * can_rcv_list_find - determine optimal filterlist inside device filter struct * @can_id: pointer to CAN identifier of a given can_filter * @mask: pointer to CAN mask of a given can_filter * @dev_rcv_lists: pointer to the device filter struct * * Description: * Returns the optimal filterlist to reduce the filter handling in the * receive path. This function is called by service functions that need * to register or unregister a can_filter in the filter lists. * * A filter matches in general, when * * <received_can_id> & mask == can_id & mask * * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe * relevant bits for the filter. * * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg * frames there is a special filterlist and a special rx path filter handling. * * Return: * Pointer to optimal filterlist for the given can_id/mask pair. * Consistency checked mask. * Reduced can_id to have a preprocessed filter compare value. */ static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask, struct can_dev_rcv_lists *dev_rcv_lists) { canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ /* filter for error message frames in extra filterlist */ if (*mask & CAN_ERR_FLAG) { /* clear CAN_ERR_FLAG in filter entry */ *mask &= CAN_ERR_MASK; return &dev_rcv_lists->rx[RX_ERR]; } /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) /* ensure valid values in can_mask for 'SFF only' frame filtering */ if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); /* reduce condition testing at receive time */ *can_id &= *mask; /* inverse can_id/can_mask filter */ if (inv) return &dev_rcv_lists->rx[RX_INV]; /* mask == 0 => no condition testing at receive time */ if (!(*mask)) return &dev_rcv_lists->rx[RX_ALL]; /* extra filterlists for the subscription of a single non-RTR can_id */ if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && !(*can_id & CAN_RTR_FLAG)) { if (*can_id & CAN_EFF_FLAG) { if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) return &dev_rcv_lists->rx_eff[effhash(*can_id)]; } else { if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) return &dev_rcv_lists->rx_sff[*can_id]; } } /* default: filter via can_id/can_mask */ return &dev_rcv_lists->rx[RX_FIL]; } /** * can_rx_register - subscribe CAN frames from a specific interface * @net: the applicable net namespace * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list) * @can_id: CAN identifier (see description) * @mask: CAN mask (see description) * @func: callback function on filter match * @data: returned parameter for callback function * @ident: string for calling module identification * @sk: socket pointer (might be NULL) * * Description: * Invokes the callback function with the received sk_buff and the given * parameter 'data' on a matching receive filter. A filter matches, when * * <received_can_id> & mask == can_id & mask * * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can * filter for error message frames (CAN_ERR_FLAG bit set in mask). * * The provided pointer to the sk_buff is guaranteed to be valid as long as * the callback function is running. The callback function must *not* free * the given sk_buff while processing it's task. When the given sk_buff is * needed after the end of the callback function it must be cloned inside * the callback function with skb_clone(). * * Return: * 0 on success * -ENOMEM on missing cache mem to create subscription entry * -ENODEV unknown device */ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data, char *ident, struct sock *sk) { struct receiver *rcv; struct hlist_head *rcv_list; struct can_dev_rcv_lists *dev_rcv_lists; struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats; /* insert new receiver (dev,canid,mask) -> (func,data) */ if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev))) return -ENODEV; if (dev && !net_eq(net, dev_net(dev))) return -ENODEV; rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL); if (!rcv) return -ENOMEM; spin_lock_bh(&net->can.rcvlists_lock); dev_rcv_lists = can_dev_rcv_lists_find(net, dev); rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists); rcv->can_id = can_id; rcv->mask = mask; rcv->matches = 0; rcv->func = func; rcv->data = data; rcv->ident = ident; rcv->sk = sk; hlist_add_head_rcu(&rcv->list, rcv_list); dev_rcv_lists->entries++; rcv_lists_stats->rcv_entries++; rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max, rcv_lists_stats->rcv_entries); spin_unlock_bh(&net->can.rcvlists_lock); return 0; } EXPORT_SYMBOL(can_rx_register); /* can_rx_delete_receiver - rcu callback for single receiver entry removal */ static void can_rx_delete_receiver(struct rcu_head *rp) { struct receiver *rcv = container_of(rp, struct receiver, rcu); struct sock *sk = rcv->sk; kmem_cache_free(rcv_cache, rcv); if (sk) sock_put(sk); } /** * can_rx_unregister - unsubscribe CAN frames from a specific interface * @net: the applicable net namespace * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) * @can_id: CAN identifier * @mask: CAN mask * @func: callback function on filter match * @data: returned parameter for callback function * * Description: * Removes subscription entry depending on given (subscription) values. */ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data) { struct receiver *rcv = NULL; struct hlist_head *rcv_list; struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats; struct can_dev_rcv_lists *dev_rcv_lists; if (dev && dev->type != ARPHRD_CAN) return; if (dev && !net_eq(net, dev_net(dev))) return; spin_lock_bh(&net->can.rcvlists_lock); dev_rcv_lists = can_dev_rcv_lists_find(net, dev); rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists); /* Search the receiver list for the item to delete. This should * exist, since no receiver may be unregistered that hasn't * been registered before. */ hlist_for_each_entry_rcu(rcv, rcv_list, list) { if (rcv->can_id == can_id && rcv->mask == mask && rcv->func == func && rcv->data == data) break; } /* Check for bugs in CAN protocol implementations using af_can.c: * 'rcv' will be NULL if no matching list item was found for removal. * As this case may potentially happen when closing a socket while * the notifier for removing the CAN netdev is running we just print * a warning here. */ if (!rcv) { pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n", DNAME(dev), can_id, mask); goto out; } hlist_del_rcu(&rcv->list); dev_rcv_lists->entries--; if (rcv_lists_stats->rcv_entries > 0) rcv_lists_stats->rcv_entries--; out: spin_unlock_bh(&net->can.rcvlists_lock); /* schedule the receiver item for deletion */ if (rcv) { if (rcv->sk) sock_hold(rcv->sk); call_rcu(&rcv->rcu, can_rx_delete_receiver); } } EXPORT_SYMBOL(can_rx_unregister); static inline void deliver(struct sk_buff *skb, struct receiver *rcv) { rcv->func(skb, rcv->data); rcv->matches++; } static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb) { struct receiver *rcv; int matches = 0; struct can_frame *cf = (struct can_frame *)skb->data; canid_t can_id = cf->can_id; if (dev_rcv_lists->entries == 0) return 0; if (can_id & CAN_ERR_FLAG) { /* check for error message frame entries only */ hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) { if (can_id & rcv->mask) { deliver(skb, rcv); matches++; } } return matches; } /* check for unfiltered entries */ hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) { deliver(skb, rcv); matches++; } /* check for can_id/mask entries */ hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) { if ((can_id & rcv->mask) == rcv->can_id) { deliver(skb, rcv); matches++; } } /* check for inverted can_id/mask entries */ hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) { if ((can_id & rcv->mask) != rcv->can_id) { deliver(skb, rcv); matches++; } } /* check filterlists for single non-RTR can_ids */ if (can_id & CAN_RTR_FLAG) return matches; if (can_id & CAN_EFF_FLAG) { hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) { if (rcv->can_id == can_id) { deliver(skb, rcv); matches++; } } } else { can_id &= CAN_SFF_MASK; hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) { deliver(skb, rcv); matches++; } } return matches; } static void can_receive(struct sk_buff *skb, struct net_device *dev) { struct can_dev_rcv_lists *dev_rcv_lists; struct net *net = dev_net(dev); struct can_pkg_stats *pkg_stats = net->can.pkg_stats; int matches; /* update statistics */ atomic_long_inc(&pkg_stats->rx_frames); atomic_long_inc(&pkg_stats->rx_frames_delta); /* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); rcu_read_lock(); /* deliver the packet to sockets listening on all devices */ matches = can_rcv_filter(net->can.rx_alldev_list, skb); /* find receive list for this device */ dev_rcv_lists = can_dev_rcv_lists_find(net, dev); matches += can_rcv_filter(dev_rcv_lists, skb); rcu_read_unlock(); /* consume the skbuff allocated by the netdevice driver */ consume_skb(skb); if (matches > 0) { atomic_long_inc(&pkg_stats->matches); atomic_long_inc(&pkg_stats->matches_delta); } } static int can_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_can_skb(skb))) { pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", dev->type, skb->len); kfree_skb_reason(skb, SKB_DROP_REASON_CAN_RX_INVALID_FRAME); return NET_RX_DROP; } can_receive(skb, dev); return NET_RX_SUCCESS; } static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canfd_skb(skb))) { pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", dev->type, skb->len); kfree_skb_reason(skb, SKB_DROP_REASON_CANFD_RX_INVALID_FRAME); return NET_RX_DROP; } can_receive(skb, dev); return NET_RX_SUCCESS; } static int canxl_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canxl_skb(skb))) { pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n", dev->type, skb->len); kfree_skb_reason(skb, SKB_DROP_REASON_CANXL_RX_INVALID_FRAME); return NET_RX_DROP; } can_receive(skb, dev); return NET_RX_SUCCESS; } /* af_can protocol functions */ /** * can_proto_register - register CAN transport protocol * @cp: pointer to CAN protocol structure * * Return: * 0 on success * -EINVAL invalid (out of range) protocol number * -EBUSY protocol already in use * -ENOBUF if proto_register() fails */ int can_proto_register(const struct can_proto *cp) { int proto = cp->protocol; int err = 0; if (proto < 0 || proto >= CAN_NPROTO) { pr_err("can: protocol number %d out of range\n", proto); return -EINVAL; } err = proto_register(cp->prot, 0); if (err < 0) return err; mutex_lock(&proto_tab_lock); if (rcu_access_pointer(proto_tab[proto])) { pr_err("can: protocol %d already registered\n", proto); err = -EBUSY; } else { RCU_INIT_POINTER(proto_tab[proto], cp); } mutex_unlock(&proto_tab_lock); if (err < 0) proto_unregister(cp->prot); return err; } EXPORT_SYMBOL(can_proto_register); /** * can_proto_unregister - unregister CAN transport protocol * @cp: pointer to CAN protocol structure */ void can_proto_unregister(const struct can_proto *cp) { int proto = cp->protocol; mutex_lock(&proto_tab_lock); BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); RCU_INIT_POINTER(proto_tab[proto], NULL); mutex_unlock(&proto_tab_lock); synchronize_rcu(); proto_unregister(cp->prot); } EXPORT_SYMBOL(can_proto_unregister); static int can_pernet_init(struct net *net) { spin_lock_init(&net->can.rcvlists_lock); net->can.rx_alldev_list = kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL); if (!net->can.rx_alldev_list) goto out; net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL); if (!net->can.pkg_stats) goto out_free_rx_alldev_list; net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL); if (!net->can.rcv_lists_stats) goto out_free_pkg_stats; if (IS_ENABLED(CONFIG_PROC_FS)) { /* the statistics are updated every second (timer triggered) */ if (stats_timer) { timer_setup(&net->can.stattimer, can_stat_update, 0); mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ)); } net->can.pkg_stats->jiffies_init = jiffies; can_init_proc(net); } return 0; out_free_pkg_stats: kfree(net->can.pkg_stats); out_free_rx_alldev_list: kfree(net->can.rx_alldev_list); out: return -ENOMEM; } static void can_pernet_exit(struct net *net) { if (IS_ENABLED(CONFIG_PROC_FS)) { can_remove_proc(net); if (stats_timer) timer_delete_sync(&net->can.stattimer); } kfree(net->can.rx_alldev_list); kfree(net->can.pkg_stats); kfree(net->can.rcv_lists_stats); } /* af_can module init/exit functions */ static struct packet_type can_packet __read_mostly = { .type = cpu_to_be16(ETH_P_CAN), .func = can_rcv, }; static struct packet_type canfd_packet __read_mostly = { .type = cpu_to_be16(ETH_P_CANFD), .func = canfd_rcv, }; static struct packet_type canxl_packet __read_mostly = { .type = cpu_to_be16(ETH_P_CANXL), .func = canxl_rcv, }; static const struct net_proto_family can_family_ops = { .family = PF_CAN, .create = can_create, .owner = THIS_MODULE, }; static struct pernet_operations can_pernet_ops __read_mostly = { .init = can_pernet_init, .exit = can_pernet_exit, }; static __init int can_init(void) { int err; /* check for correct padding to be able to use the structs similarly */ BUILD_BUG_ON(offsetof(struct can_frame, len) != offsetof(struct canfd_frame, len) || offsetof(struct can_frame, len) != offsetof(struct canxl_frame, flags) || offsetof(struct can_frame, data) != offsetof(struct canfd_frame, data)); pr_info("can: controller area network core\n"); rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 0, 0, NULL); if (!rcv_cache) return -ENOMEM; err = register_pernet_subsys(&can_pernet_ops); if (err) goto out_pernet; /* protocol register */ err = sock_register(&can_family_ops); if (err) goto out_sock; dev_add_pack(&can_packet); dev_add_pack(&canfd_packet); dev_add_pack(&canxl_packet); return 0; out_sock: unregister_pernet_subsys(&can_pernet_ops); out_pernet: kmem_cache_destroy(rcv_cache); return err; } static __exit void can_exit(void) { /* protocol unregister */ dev_remove_pack(&canxl_packet); dev_remove_pack(&canfd_packet); dev_remove_pack(&can_packet); sock_unregister(PF_CAN); unregister_pernet_subsys(&can_pernet_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ kmem_cache_destroy(rcv_cache); } module_init(can_init); module_exit(can_exit); |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 | /* * net/tipc/msg.c: TIPC message header routines * * Copyright (c) 2000-2006, 2014-2015, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <net/sock.h> #include "core.h" #include "msg.h" #include "addr.h" #include "name_table.h" #include "crypto.h" #define BUF_ALIGN(x) ALIGN(x, 4) #define MAX_FORWARD_SIZE 1024 #ifdef CONFIG_TIPC_CRYPTO #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE) #else #define BUF_HEADROOM (LL_MAX_HEADER + 48) #define BUF_OVERHEAD BUF_HEADROOM #endif const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /** * tipc_buf_acquire - creates a TIPC message buffer * @size: message size (including TIPC header) * @gfp: memory allocation flags * * Return: a new buffer with data pointers set to the specified size. * * NOTE: * Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) { struct sk_buff *skb; skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); skb->next = NULL; } return skb; } void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, u32 hsize, u32 dnode) { memset(m, 0, hsize); msg_set_version(m); msg_set_user(m, user); msg_set_hdr_sz(m, hsize); msg_set_size(m, hsize); msg_set_prevnode(m, own_node); msg_set_type(m, type); if (hsize > SHORT_H_SIZE) { msg_set_orignode(m, own_node); msg_set_destnode(m, dnode); } } struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, uint data_sz, u32 dnode, u32 onode, u32 dport, u32 oport, int errcode) { struct tipc_msg *msg; struct sk_buff *buf; buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); if (unlikely(!buf)) return NULL; msg = buf_msg(buf); tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); msg_set_size(msg, hdr_sz + data_sz); msg_set_origport(msg, oport); msg_set_destport(msg, dport); msg_set_errcode(msg, errcode); return buf; } /* tipc_buf_append(): Append a buffer to the fragment list of another buffer * @*headbuf: in: NULL for first frag, otherwise value returned from prev call * out: set when successful non-complete reassembly, otherwise NULL * @*buf: in: the buffer to append. Always defined * out: head buf after successful complete reassembly, otherwise NULL * Returns 1 when reassembly complete, otherwise 0 */ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) { struct sk_buff *head = *headbuf; struct sk_buff *frag = *buf; struct sk_buff *tail = NULL; struct tipc_msg *msg; u32 fragid; int delta; bool headstolen; if (!frag) goto err; msg = buf_msg(frag); fragid = msg_type(msg); frag->next = NULL; skb_pull(frag, msg_hdr_sz(msg)); if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; if (skb_has_frag_list(frag) && __skb_linearize(frag)) goto err; *buf = NULL; frag = skb_unshare(frag, GFP_ATOMIC); if (unlikely(!frag)) goto err; head = *headbuf = frag; TIPC_SKB_CB(head)->tail = NULL; return 0; } if (!head) goto err; /* Either the input skb ownership is transferred to headskb * or the input skb is freed, clear the reference to avoid * bad access on error path. */ *buf = NULL; if (skb_try_coalesce(head, frag, &headstolen, &delta)) { kfree_skb_partial(frag, headstolen); } else { tail = TIPC_SKB_CB(head)->tail; if (!skb_has_frag_list(head)) skb_shinfo(head)->frag_list = frag; else tail->next = frag; head->truesize += frag->truesize; head->data_len += frag->len; head->len += frag->len; TIPC_SKB_CB(head)->tail = frag; } if (fragid == LAST_FRAGMENT) { TIPC_SKB_CB(head)->validated = 0; if (unlikely(!tipc_msg_validate(&head))) goto err; *buf = head; TIPC_SKB_CB(head)->tail = NULL; *headbuf = NULL; return 1; } return 0; err: kfree_skb(*buf); kfree_skb(*headbuf); *buf = *headbuf = NULL; return 0; } /** * tipc_msg_append(): Append data to tail of an existing buffer queue * @_hdr: header to be used * @m: the data to be appended * @mss: max allowable size of buffer * @dlen: size of data to be appended * @txq: queue to append to * * Return: the number of 1k blocks appended or errno value */ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, int mss, struct sk_buff_head *txq) { struct sk_buff *skb; int accounted, total, curr; int mlen, cpy, rem = dlen; struct tipc_msg *hdr; skb = skb_peek_tail(txq); accounted = skb ? msg_blocks(buf_msg(skb)) : 0; total = accounted; do { if (!skb || skb->len >= mss) { skb = tipc_buf_acquire(mss, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; skb_orphan(skb); skb_trim(skb, MIN_H_SIZE); hdr = buf_msg(skb); skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); msg_set_hdr_sz(hdr, MIN_H_SIZE); msg_set_size(hdr, MIN_H_SIZE); __skb_queue_tail(txq, skb); total += 1; } hdr = buf_msg(skb); curr = msg_blocks(hdr); mlen = msg_size(hdr); cpy = min_t(size_t, rem, mss - mlen); if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) return -EFAULT; msg_set_size(hdr, mlen + cpy); skb_put(skb, cpy); rem -= cpy; total += msg_blocks(hdr) - curr; } while (rem > 0); return total - accounted; } /* tipc_msg_validate - validate basic format of received message * * This routine ensures a TIPC message has an acceptable header, and at least * as much data as the header indicates it should. The routine also ensures * that the entire message header is stored in the main fragment of the message * buffer, to simplify future access to message header fields. * * Note: Having extra info present in the message header or data areas is OK. * TIPC will ignore the excess, under the assumption that it is optional info * introduced by a later release of the protocol. */ bool tipc_msg_validate(struct sk_buff **_skb) { struct sk_buff *skb = *_skb; struct tipc_msg *hdr; int msz, hsz; /* Ensure that flow control ratio condition is satisfied */ if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); if (!skb) return false; kfree_skb(*_skb); *_skb = skb; } if (unlikely(TIPC_SKB_CB(skb)->validated)) return true; if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) return false; hsz = msg_hdr_sz(buf_msg(skb)); if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) return false; if (unlikely(!pskb_may_pull(skb, hsz))) return false; hdr = buf_msg(skb); if (unlikely(msg_version(hdr) != TIPC_VERSION)) return false; msz = msg_size(hdr); if (unlikely(msz < hsz)) return false; if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) return false; if (unlikely(skb->len < msz)) return false; TIPC_SKB_CB(skb)->validated = 1; return true; } /** * tipc_msg_fragment - build a fragment skb list for TIPC message * * @skb: TIPC message skb * @hdr: internal msg header to be put on the top of the fragments * @pktmax: max size of a fragment incl. the header * @frags: returned fragment skb list * * Return: 0 if the fragmentation is successful, otherwise: -EINVAL * or -ENOMEM */ int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, int pktmax, struct sk_buff_head *frags) { int pktno, nof_fragms, dsz, dmax, eat; struct tipc_msg *_hdr; struct sk_buff *_skb; u8 *data; /* Non-linear buffer? */ if (skb_linearize(skb)) return -ENOMEM; data = (u8 *)skb->data; dsz = msg_size(buf_msg(skb)); dmax = pktmax - INT_H_SIZE; if (dsz <= dmax || !dmax) return -EINVAL; nof_fragms = dsz / dmax + 1; for (pktno = 1; pktno <= nof_fragms; pktno++) { if (pktno < nof_fragms) eat = dmax; else eat = dsz % dmax; /* Allocate a new fragment */ _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); if (!_skb) goto error; skb_orphan(_skb); __skb_queue_tail(frags, _skb); /* Copy header & data to the fragment */ skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); data += eat; /* Update the fragment's header */ _hdr = buf_msg(_skb); msg_set_fragm_no(_hdr, pktno); msg_set_nof_fragms(_hdr, nof_fragms); msg_set_size(_hdr, INT_H_SIZE + eat); } return 0; error: __skb_queue_purge(frags); __skb_queue_head_init(frags); return -ENOMEM; } /** * tipc_msg_build - create buffer chain containing specified header and data * @mhdr: Message header, to be prepended to data * @m: User message * @offset: buffer offset for fragmented messages (FIXME) * @dsz: Total length of user data * @pktmax: Max packet size that can be used * @list: Buffer or chain of buffers to be returned to caller * * Note that the recursive call we are making here is safe, since it can * logically go only one further level down. * * Return: message data size or errno: -ENOMEM, -EFAULT */ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int pktmax, struct sk_buff_head *list) { int mhsz = msg_hdr_sz(mhdr); struct tipc_msg pkthdr; int msz = mhsz + dsz; int pktrem = pktmax; struct sk_buff *skb; int drem = dsz; int pktno = 1; char *pktpos; int pktsz; int rc; msg_set_size(mhdr, msz); /* No fragmentation needed? */ if (likely(msz <= pktmax)) { skb = tipc_buf_acquire(msz, GFP_KERNEL); /* Fall back to smaller MTU if node local message */ if (unlikely(!skb)) { if (pktmax != MAX_MSG_SIZE) return -ENOMEM; rc = tipc_msg_build(mhdr, m, offset, dsz, one_page_mtu, list); if (rc != dsz) return rc; if (tipc_msg_assemble(list)) return dsz; return -ENOMEM; } skb_orphan(skb); __skb_queue_tail(list, skb); skb_copy_to_linear_data(skb, mhdr, mhsz); pktpos = skb->data + mhsz; if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) return dsz; rc = -EFAULT; goto error; } /* Prepare reusable fragment header */ tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); msg_set_size(&pkthdr, pktmax); msg_set_fragm_no(&pkthdr, pktno); msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ skb = tipc_buf_acquire(pktmax, GFP_KERNEL); if (!skb) return -ENOMEM; skb_orphan(skb); __skb_queue_tail(list, skb); pktpos = skb->data; skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; pktrem -= INT_H_SIZE; skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); pktpos += mhsz; pktrem -= mhsz; do { if (drem < pktrem) pktrem = drem; if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { rc = -EFAULT; goto error; } drem -= pktrem; if (!drem) break; /* Prepare new fragment: */ if (drem < (pktmax - INT_H_SIZE)) pktsz = drem + INT_H_SIZE; else pktsz = pktmax; skb = tipc_buf_acquire(pktsz, GFP_KERNEL); if (!skb) { rc = -ENOMEM; goto error; } skb_orphan(skb); __skb_queue_tail(list, skb); msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); msg_set_fragm_no(&pkthdr, ++pktno); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos = skb->data + INT_H_SIZE; pktrem = pktsz - INT_H_SIZE; } while (1); msg_set_type(buf_msg(skb), LAST_FRAGMENT); return dsz; error: __skb_queue_purge(list); __skb_queue_head_init(list); return rc; } /** * tipc_msg_bundle - Append contents of a buffer to tail of an existing one * @bskb: the bundle buffer to append to * @msg: message to be appended * @max: max allowable size for the bundle buffer * * Return: "true" if bundling has been performed, otherwise "false" */ static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, u32 max) { struct tipc_msg *bmsg = buf_msg(bskb); u32 msz, bsz, offset, pad; msz = msg_size(msg); bsz = msg_size(bmsg); offset = BUF_ALIGN(bsz); pad = offset - bsz; if (unlikely(skb_tailroom(bskb) < (pad + msz))) return false; if (unlikely(max < (offset + msz))) return false; skb_put(bskb, pad + msz); skb_copy_to_linear_data_offset(bskb, offset, msg, msz); msg_set_size(bmsg, offset + msz); msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); return true; } /** * tipc_msg_try_bundle - Try to bundle a new message to the last one * @tskb: the last/target message to which the new one will be appended * @skb: the new message skb pointer * @mss: max message size (header inclusive) * @dnode: destination node for the message * @new_bundle: if this call made a new bundle or not * * Return: "true" if the new message skb is potential for bundling this time or * later, in the case a bundling has been done this time, the skb is consumed * (the skb pointer = NULL). * Otherwise, "false" if the skb cannot be bundled at all. */ bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, u32 dnode, bool *new_bundle) { struct tipc_msg *msg, *inner, *outer; u32 tsz; /* First, check if the new buffer is suitable for bundling */ msg = buf_msg(*skb); if (msg_user(msg) == MSG_FRAGMENTER) return false; if (msg_user(msg) == TUNNEL_PROTOCOL) return false; if (msg_user(msg) == BCAST_PROTOCOL) return false; if (mss <= INT_H_SIZE + msg_size(msg)) return false; /* Ok, but the last/target buffer can be empty? */ if (unlikely(!tskb)) return true; /* Is it a bundle already? Try to bundle the new message to it */ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { *new_bundle = false; goto bundle; } /* Make a new bundle of the two messages if possible */ tsz = msg_size(buf_msg(tskb)); if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg))) return true; if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, GFP_ATOMIC))) return true; inner = buf_msg(tskb); skb_push(tskb, INT_H_SIZE); outer = buf_msg(tskb); tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, dnode); msg_set_importance(outer, msg_importance(inner)); msg_set_size(outer, INT_H_SIZE + tsz); msg_set_msgcnt(outer, 1); *new_bundle = true; bundle: if (likely(tipc_msg_bundle(tskb, msg, mss))) { consume_skb(*skb); *skb = NULL; } return true; } /** * tipc_msg_extract(): extract bundled inner packet from buffer * @skb: buffer to be extracted from. * @iskb: extracted inner buffer, to be returned * @pos: position in outer message of msg to be extracted. * Returns position of next msg. * Consumes outer buffer when last packet extracted * Return: true when there is an extracted buffer, otherwise false */ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) { struct tipc_msg *hdr, *ihdr; int imsz; *iskb = NULL; if (unlikely(skb_linearize(skb))) goto none; hdr = buf_msg(skb); if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) goto none; ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); imsz = msg_size(ihdr); if ((*pos + imsz) > msg_data_sz(hdr)) goto none; *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); if (!*iskb) goto none; skb_copy_to_linear_data(*iskb, ihdr, imsz); if (unlikely(!tipc_msg_validate(iskb))) goto none; *pos += BUF_ALIGN(imsz); return true; none: kfree_skb(skb); kfree_skb(*iskb); *iskb = NULL; return false; } /** * tipc_msg_reverse(): swap source and destination addresses and add error code * @own_node: originating node id for reversed message * @skb: buffer containing message to be reversed; will be consumed * @err: error code to be set in message, if any * Replaces consumed buffer with new one when successful * Return: true if success, otherwise false */ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) { struct sk_buff *_skb = *skb; struct tipc_msg *_hdr, *hdr; int hlen, dlen; if (skb_linearize(_skb)) goto exit; _hdr = buf_msg(_skb); dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); hlen = msg_hdr_sz(_hdr); if (msg_dest_droppable(_hdr)) goto exit; if (msg_errcode(_hdr)) goto exit; /* Never return SHORT header */ if (hlen == SHORT_H_SIZE) hlen = BASIC_H_SIZE; /* Don't return data along with SYN+, - sender has a clone */ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) dlen = 0; /* Allocate new buffer to return */ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); if (!*skb) goto exit; memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); /* Build reverse header in new buffer */ hdr = buf_msg(*skb); msg_set_hdr_sz(hdr, hlen); msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); msg_set_origport(hdr, msg_destport(_hdr)); msg_set_destport(hdr, msg_origport(_hdr)); msg_set_destnode(hdr, msg_prevnode(_hdr)); msg_set_prevnode(hdr, own_node); msg_set_orignode(hdr, own_node); msg_set_size(hdr, hlen + dlen); skb_orphan(_skb); kfree_skb(_skb); return true; exit: kfree_skb(_skb); *skb = NULL; return false; } bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) { struct sk_buff *skb, *_skb; skb_queue_walk(msg, skb) { _skb = skb_clone(skb, GFP_ATOMIC); if (!_skb) { __skb_queue_purge(cpy); pr_err_ratelimited("Failed to clone buffer chain\n"); return false; } __skb_queue_tail(cpy, _skb); } return true; } /** * tipc_msg_lookup_dest(): try to find new destination for named message * @net: pointer to associated network namespace * @skb: the buffer containing the message. * @err: error code to be used by caller if lookup fails * Does not consume buffer * Return: true if a destination is found, false otherwise */ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) { struct tipc_msg *msg = buf_msg(skb); u32 scope = msg_lookup_scope(msg); u32 self = tipc_own_addr(net); u32 inst = msg_nameinst(msg); struct tipc_socket_addr sk; struct tipc_uaddr ua; if (!msg_isdata(msg)) return false; if (!msg_named(msg)) return false; if (msg_errcode(msg)) return false; *err = TIPC_ERR_NO_NAME; if (skb_linearize(skb)) return false; msg = buf_msg(skb); if (msg_reroute_cnt(msg)) return false; tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope, msg_nametype(msg), inst, inst); sk.node = tipc_scope2node(net, scope); if (!tipc_nametbl_lookup_anycast(net, &ua, &sk)) return false; msg_incr_reroute_cnt(msg); if (sk.node != self) msg_set_prevnode(msg, self); msg_set_destnode(msg, sk.node); msg_set_destport(msg, sk.ref); *err = TIPC_OK; return true; } /* tipc_msg_assemble() - assemble chain of fragments into one message */ bool tipc_msg_assemble(struct sk_buff_head *list) { struct sk_buff *skb, *tmp = NULL; if (skb_queue_len(list) == 1) return true; while ((skb = __skb_dequeue(list))) { skb->next = NULL; if (tipc_buf_append(&tmp, &skb)) { __skb_queue_tail(list, skb); return true; } if (!tmp) break; } __skb_queue_purge(list); __skb_queue_head_init(list); pr_warn("Failed do assemble buffer\n"); return false; } /* tipc_msg_reassemble() - clone a buffer chain of fragments and * reassemble the clones into one message */ bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) { struct sk_buff *skb, *_skb; struct sk_buff *frag = NULL; struct sk_buff *head = NULL; int hdr_len; /* Copy header if single buffer */ if (skb_queue_len(list) == 1) { skb = skb_peek(list); hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); if (!_skb) return false; __skb_queue_tail(rcvq, _skb); return true; } /* Clone all fragments and reassemble */ skb_queue_walk(list, skb) { frag = skb_clone(skb, GFP_ATOMIC); if (!frag) goto error; frag->next = NULL; if (tipc_buf_append(&head, &frag)) break; if (!head) goto error; } __skb_queue_tail(rcvq, frag); return true; error: pr_warn("Failed do clone local mcast rcv buffer\n"); kfree_skb(head); return false; } bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, struct sk_buff_head *cpy) { struct sk_buff *skb, *_skb; skb_queue_walk(msg, skb) { _skb = pskb_copy(skb, GFP_ATOMIC); if (!_skb) { __skb_queue_purge(cpy); return false; } msg_set_destnode(buf_msg(_skb), dst); __skb_queue_tail(cpy, _skb); } return true; } /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number * @list: list to be appended to * @seqno: sequence number of buffer to add * @skb: buffer to add */ bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, struct sk_buff *skb) { struct sk_buff *_skb, *tmp; if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { __skb_queue_head(list, skb); return true; } if (more(seqno, buf_seqno(skb_peek_tail(list)))) { __skb_queue_tail(list, skb); return true; } skb_queue_walk_safe(list, _skb, tmp) { if (more(seqno, buf_seqno(_skb))) continue; if (seqno == buf_seqno(_skb)) break; __skb_queue_before(list, _skb, skb); return true; } kfree_skb(skb); return false; } void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, struct sk_buff_head *xmitq) { if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) __skb_queue_tail(xmitq, skb); } |
| 52 24 30 1 1 1 1 1 1 1 11 1 9 9 1 9 9 9 9 1 1 12 11 68 54 8 8 8 8 57 15 58 21 19 18 6 11 4 5 4 3 2 5 50 51 51 50 45 44 1 44 45 4 5 5 70 11 10 65 63 47 18 67 66 74 97 2 1 1 1 1 1 5 5 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic pidhash and scalable, time-bounded PID allocator * * (C) 2002-2003 Nadia Yvette Chambers, IBM * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and * parking tasks using given ID's on a list. * * The hash is always changed with the tasklist_lock write-acquired, * and the hash is only accessed with the tasklist_lock at least * read-acquired, so there's no additional SMP locking needed here. * * We have a list of bitmap pages, which bitmaps represent the PID space. * Allocating and freeing PIDs is completely lockless. The worst-case * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). * * Pid namespaces: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/memblock.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/refcount.h> #include <linux/anon_inodes.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/idr.h> #include <linux/pidfs.h> #include <linux/seqlock.h> #include <net/sock.h> #include <uapi/linux/pidfd.h> struct pid init_struct_pid = { .count = REFCOUNT_INIT(1), .tasks = { { .first = NULL }, { .first = NULL }, { .first = NULL }, }, .level = 0, .numbers = { { .nr = 0, .ns = &init_pid_ns, }, } }; static int pid_max_min = RESERVED_PIDS + 1; static int pid_max_max = PID_MAX_LIMIT; /* * PID-map pages start out as NULL, they get allocated upon * first use and are never deallocated. This way a low pid_max * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { .ns = NS_COMMON_INIT(init_pid_ns), .idr = IDR_INIT(init_pid_ns.idr), .pid_allocated = PIDNS_ADDING, .level = 0, .child_reaper = &init_task, .user_ns = &init_user_ns, .pid_max = PID_MAX_DEFAULT, #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, #endif }; EXPORT_SYMBOL_GPL(init_pid_ns); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock); void put_pid(struct pid *pid) { struct pid_namespace *ns; if (!pid) return; ns = pid->numbers[pid->level].ns; if (refcount_dec_and_test(&pid->count)) { pidfs_free_pid(pid); kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } } EXPORT_SYMBOL_GPL(put_pid); static void delayed_put_pid(struct rcu_head *rhp) { struct pid *pid = container_of(rhp, struct pid, rcu); put_pid(pid); } void free_pid(struct pid *pid) { int i; struct pid_namespace *active_ns; lockdep_assert_not_held(&tasklist_lock); active_ns = pid->numbers[pid->level].ns; ns_ref_active_put(active_ns); spin_lock(&pidmap_lock); for (i = 0; i <= pid->level; i++) { struct upid *upid = pid->numbers + i; struct pid_namespace *ns = upid->ns; switch (--ns->pid_allocated) { case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper * may be sleeping in zap_pid_ns_processes(). */ wake_up_process(ns->child_reaper); break; case PIDNS_ADDING: /* Handle a fork failure of the first process */ WARN_ON(ns->child_reaper); ns->pid_allocated = 0; break; } idr_remove(&ns->idr, upid->nr); } pidfs_remove_pid(pid); spin_unlock(&pidmap_lock); call_rcu(&pid->rcu, delayed_put_pid); } void free_pids(struct pid **pids) { int tmp; /* * This can batch pidmap_lock. */ for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pids[tmp]) free_pid(pids[tmp]); } struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, size_t set_tid_size) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; int retval = -ENOMEM; /* * set_tid_size contains the size of the set_tid array. Starting at * the most nested currently active PID namespace it tells alloc_pid() * which PID to set for a process in that most nested PID namespace * up to set_tid_size PID namespaces. It does not have to set the PID * for a process in all nested PID namespaces but set_tid_size must * never be greater than the current ns->level + 1. */ if (set_tid_size > ns->level + 1) return ERR_PTR(-EINVAL); pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) return ERR_PTR(retval); tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { int tid = 0; int pid_max = READ_ONCE(tmp->pid_max); if (set_tid_size) { tid = set_tid[ns->level - i]; retval = -EINVAL; if (tid < 1 || tid >= pid_max) goto out_free; /* * Also fail if a PID != 1 is requested and * no PID 1 exists. */ if (tid != 1 && !tmp->child_reaper) goto out_free; retval = -EPERM; if (!checkpoint_restore_ns_capable(tmp->user_ns)) goto out_free; set_tid_size--; } idr_preload(GFP_KERNEL); spin_lock(&pidmap_lock); if (tid) { nr = idr_alloc(&tmp->idr, NULL, tid, tid + 1, GFP_ATOMIC); /* * If ENOSPC is returned it means that the PID is * alreay in use. Return EEXIST in that case. */ if (nr == -ENOSPC) nr = -EEXIST; } else { int pid_min = 1; /* * init really needs pid 1, but after reaching the * maximum wrap back to RESERVED_PIDS */ if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) pid_min = RESERVED_PIDS; /* * Store a null pointer so find_pid_ns does not find * a partially initialized PID (see below). */ nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, pid_max, GFP_ATOMIC); } spin_unlock(&pidmap_lock); idr_preload_end(); if (nr < 0) { retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } /* * ENOMEM is not the most obvious choice especially for the case * where the child subreaper has already exited and the pid * namespace denies the creation of any new processes. But ENOMEM * is what we have exposed to userspace for a long time and it is * documented behavior for pid namespaces. So we can't easily * change it even if there were an error code better suited. */ retval = -ENOMEM; get_pid_ns(ns); refcount_set(&pid->count, 1); spin_lock_init(&pid->lock); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); init_waitqueue_head(&pid->wait_pidfd); INIT_HLIST_HEAD(&pid->inodes); upid = pid->numbers + ns->level; idr_preload(GFP_KERNEL); spin_lock(&pidmap_lock); if (!(ns->pid_allocated & PIDNS_ADDING)) goto out_unlock; pidfs_add_pid(pid); for ( ; upid >= pid->numbers; --upid) { /* Make the PID visible to find_pid_ns. */ idr_replace(&upid->ns->idr, pid, upid->nr); upid->ns->pid_allocated++; } spin_unlock(&pidmap_lock); idr_preload_end(); ns_ref_active_get(ns); return pid; out_unlock: spin_unlock(&pidmap_lock); idr_preload_end(); put_pid_ns(ns); out_free: spin_lock(&pidmap_lock); while (++i <= ns->level) { upid = pid->numbers + i; idr_remove(&upid->ns->idr, upid->nr); } /* On failure to allocate the first pid, reset the state */ if (ns->pid_allocated == PIDNS_ADDING) idr_set_cursor(&ns->idr, 0); spin_unlock(&pidmap_lock); kmem_cache_free(ns->pid_cachep, pid); return ERR_PTR(retval); } void disable_pid_allocation(struct pid_namespace *ns) { spin_lock(&pidmap_lock); ns->pid_allocated &= ~PIDNS_ADDING; spin_unlock(&pidmap_lock); } struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { return idr_find(&ns->idr, nr); } EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) { return (type == PIDTYPE_PID) ? &task->thread_pid : &task->signal->pids[type]; } /* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; lockdep_assert_held_write(&tasklist_lock); pid = *task_pid_ptr(task, type); hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); } static void __change_pid(struct pid **pids, struct task_struct *task, enum pid_type type, struct pid *new) { struct pid **pid_ptr, *pid; int tmp; lockdep_assert_held_write(&tasklist_lock); pid_ptr = task_pid_ptr(task, type); pid = *pid_ptr; hlist_del_rcu(&task->pid_links[type]); *pid_ptr = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pid_has_task(pid, tmp)) return; WARN_ON(pids[type]); pids[type] = pid; } void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type) { __change_pid(pids, task, type, NULL); } void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type, struct pid *pid) { __change_pid(pids, task, type, pid); attach_pid(task, type); } void exchange_tids(struct task_struct *left, struct task_struct *right) { struct pid *pid1 = left->thread_pid; struct pid *pid2 = right->thread_pid; struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; lockdep_assert_held_write(&tasklist_lock); /* Swap the single entry tid lists */ hlists_swap_heads_rcu(head1, head2); /* Swap the per task_struct pid */ rcu_assign_pointer(left->thread_pid, pid2); rcu_assign_pointer(right->thread_pid, pid1); /* Swap the cached value */ WRITE_ONCE(left->pid, pid_nr(pid2)); WRITE_ONCE(right->pid, pid_nr(pid1)); } /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { WARN_ON_ONCE(type == PIDTYPE_PID); lockdep_assert_held_write(&tasklist_lock); hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); } struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pid_links[(type)]); } return result; } EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock(). */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock() protection"); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } struct task_struct *find_task_by_vpid(pid_t vnr) { return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } struct task_struct *find_get_task_by_vpid(pid_t nr) { struct task_struct *task; rcu_read_lock(); task = find_task_by_vpid(nr); if (task) get_task_struct(task); rcu_read_unlock(); return task; } struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result; rcu_read_lock(); result = pid_task(pid, type); if (result) get_task_struct(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { struct pid *pid; rcu_read_lock(); pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; if (pid && ns && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; } return nr; } EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns) { pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); if (ns) nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); rcu_read_unlock(); return nr; } EXPORT_SYMBOL(__task_pid_nr_ns); struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); } EXPORT_SYMBOL_GPL(task_active_pid_ns); /* * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { return idr_get_next(&ns->idr, &nr); } EXPORT_SYMBOL_GPL(find_ge_pid); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) { CLASS(fd, f)(fd); struct pid *pid; if (fd_empty(f)) return ERR_PTR(-EBADF); pid = pidfd_pid(fd_file(f)); if (!IS_ERR(pid)) { get_pid(pid); *flags = fd_file(f)->f_flags; } return pid; } /** * pidfd_get_task() - Get the task associated with a pidfd * * @pidfd: pidfd for which to get the task * @flags: flags associated with this pidfd * * Return the task associated with @pidfd. The function takes a reference on * the returned task. The caller is responsible for releasing that reference. * * Return: On success, the task_struct associated with the pidfd. * On error, a negative errno number will be returned. */ struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) { unsigned int f_flags = 0; struct pid *pid; struct task_struct *task; enum pid_type type; switch (pidfd) { case PIDFD_SELF_THREAD: type = PIDTYPE_PID; pid = get_task_pid(current, type); break; case PIDFD_SELF_THREAD_GROUP: type = PIDTYPE_TGID; pid = get_task_pid(current, type); break; default: pid = pidfd_get_pid(pidfd, &f_flags); if (IS_ERR(pid)) return ERR_CAST(pid); type = PIDTYPE_TGID; break; } task = get_pid_task(pid, type); put_pid(pid); if (!task) return ERR_PTR(-ESRCH); *flags = f_flags; return task; } /** * pidfd_create() - Create a new pid file descriptor. * * @pid: struct pid that the pidfd will reference * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set. * * Note, that this function can only be called after the fd table has * been unshared to avoid leaking the pidfd to the new process. * * This symbol should not be explicitly exported to loadable modules. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ static int pidfd_create(struct pid *pid, unsigned int flags) { int pidfd; struct file *pidfd_file; pidfd = pidfd_prepare(pid, flags, &pidfd_file); if (pidfd < 0) return pidfd; fd_install(pidfd, pidfd_file); return pidfd; } /** * sys_pidfd_open() - Open new pid file descriptor. * * @pid: pid for which to retrieve a pidfd * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set for * the task identified by @pid. Without PIDFD_THREAD flag the target task * must be a thread-group leader. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) { int fd; struct pid *p; if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) return -EINVAL; if (pid <= 0) return -EINVAL; p = find_get_pid(pid); if (!p) return -ESRCH; fd = pidfd_create(p, flags); put_pid(p); return fd; } #ifdef CONFIG_SYSCTL static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root) { return &task_active_pid_ns(current)->set; } static int set_is_seen(struct ctl_table_set *set) { return &task_active_pid_ns(current)->set == set; } static int pid_table_root_permissions(struct ctl_table_header *head, const struct ctl_table *table) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); int mode = table->mode; if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) || uid_eq(current_euid(), make_kuid(pidns->user_ns, 0))) mode = (mode & S_IRWXU) >> 6; else if (in_egroup_p(make_kgid(pidns->user_ns, 0))) mode = (mode & S_IRWXG) >> 3; else mode = mode & S_IROTH; return (mode << 6) | (mode << 3) | mode; } static void pid_table_root_set_ownership(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); kuid_t ns_root_uid; kgid_t ns_root_gid; ns_root_uid = make_kuid(pidns->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; ns_root_gid = make_kgid(pidns->user_ns, 0); if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } static struct ctl_table_root pid_table_root = { .lookup = pid_table_root_lookup, .permissions = pid_table_root_permissions, .set_ownership = pid_table_root_set_ownership, }; static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp_pid; int r; struct ctl_table tmp_table = *table; tmp_pid = pid_vnr(cad_pid); tmp_table.data = &tmp_pid; r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); if (r || !write) return r; new_pid = find_get_pid(tmp_pid); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } static const struct ctl_table pid_table[] = { { .procname = "pid_max", .data = &init_pid_ns.pid_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif }; #endif int register_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL struct ctl_table *tbl; setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen); tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL); if (!tbl) return -ENOMEM; tbl->data = &pidns->pid_max; pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl, ARRAY_SIZE(pid_table)); if (!pidns->sysctls) { kfree(tbl); retire_sysctl_set(&pidns->set); return -ENOMEM; } #endif return 0; } void unregister_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL const struct ctl_table *tbl; tbl = pidns->sysctls->ctl_table_arg; unregister_sysctl_table(pidns->sysctls); retire_sysctl_set(&pidns->set); kfree(tbl); #endif } void __init pid_idr_init(void) { /* Verify no one has done anything silly: */ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); /* bump default and minimum pid_max based on number of cpus */ init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pid_max_min = max_t(int, pid_max_min, PIDS_PER_CPU_MIN * num_possible_cpus()); pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min); idr_init(&init_pid_ns.idr); init_pid_ns.pid_cachep = kmem_cache_create("pid", struct_size_t(struct pid, numbers, 1), __alignof__(struct pid), SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); } static __init int pid_namespace_sysctl_init(void) { #ifdef CONFIG_SYSCTL /* "kernel" directory will have already been initialized. */ BUG_ON(register_pidns_sysctls(&init_pid_ns)); #endif return 0; } subsys_initcall(pid_namespace_sysctl_init); static struct file *__pidfd_fget(struct task_struct *task, int fd) { struct file *file; int ret; ret = down_read_killable(&task->signal->exec_update_lock); if (ret) return ERR_PTR(ret); if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) file = fget_task(task, fd); else file = ERR_PTR(-EPERM); up_read(&task->signal->exec_update_lock); if (!file) { /* * It is possible that the target thread is exiting; it can be * either: * 1. before exit_signals(), which gives a real fd * 2. before exit_files() takes the task_lock() gives a real fd * 3. after exit_files() releases task_lock(), ->files is NULL; * this has PF_EXITING, since it was set in exit_signals(), * __pidfd_fget() returns EBADF. * In case 3 we get EBADF, but that really means ESRCH, since * the task is currently exiting and has freed its files * struct, so we fix it up. */ if (task->flags & PF_EXITING) file = ERR_PTR(-ESRCH); else file = ERR_PTR(-EBADF); } return file; } static int pidfd_getfd(struct pid *pid, int fd) { struct task_struct *task; struct file *file; int ret; task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; file = __pidfd_fget(task, fd); put_task_struct(task); if (IS_ERR(file)) return PTR_ERR(file); ret = receive_fd(file, NULL, O_CLOEXEC); fput(file); return ret; } /** * sys_pidfd_getfd() - Get a file descriptor from another process * * @pidfd: the pidfd file descriptor of the process * @fd: the file descriptor number to get * @flags: flags on how to get the fd (reserved) * * This syscall gets a copy of a file descriptor from another process * based on the pidfd, and file descriptor number. It requires that * the calling process has the ability to ptrace the process represented * by the pidfd. The process which is having its file descriptor copied * is otherwise unaffected. * * Return: On success, a cloexec file descriptor is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, unsigned int, flags) { struct pid *pid; /* flags is currently unused - make sure it's unset */ if (flags) return -EINVAL; CLASS(fd, f)(pidfd); if (fd_empty(f)) return -EBADF; pid = pidfd_pid(fd_file(f)); if (IS_ERR(pid)) return PTR_ERR(pid); return pidfd_getfd(pid, fd); } |
| 8 6 1 22 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_LOG_PRIV_H__ #define __XFS_LOG_PRIV_H__ #include "xfs_extent_busy.h" /* for struct xfs_busy_extents */ struct xfs_buf; struct xlog; struct xlog_ticket; struct xfs_mount; /* * get client id from packed copy. * * this hack is here because the xlog_pack code copies four bytes * of xlog_op_header containing the fields oh_clientid, oh_flags * and oh_res2 into the packed copy. * * later on this four byte chunk is treated as an int and the * client id is pulled out. * * this has endian issues, of course. */ static inline uint xlog_get_client_id(__be32 i) { return be32_to_cpu(i) >> 24; } /* * In core log state */ enum xlog_iclog_state { XLOG_STATE_ACTIVE, /* Current IC log being written to */ XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ XLOG_STATE_SYNCING, /* This IC log is syncing */ XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ XLOG_STATE_CALLBACK, /* Callback functions now */ XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ }; #define XLOG_STATE_STRINGS \ { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } /* * In core log flags */ #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */ #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */ #define XLOG_ICL_STRINGS \ { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } /* * Log ticket flags */ #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */ #define XLOG_TIC_FLAGS \ { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } /* * Below are states for covering allocation transactions. * By covering, we mean changing the h_tail_lsn in the last on-disk * log write such that no allocation transactions will be re-done during * recovery after a system crash. Recovery starts at the last on-disk * log write. * * These states are used to insert dummy log entries to cover * space allocation transactions which can undo non-transactional changes * after a crash. Writes to a file with space * already allocated do not result in any transactions. Allocations * might include space beyond the EOF. So if we just push the EOF a * little, the last transaction for the file could contain the wrong * size. If there is no file system activity, after an allocation * transaction, and the system crashes, the allocation transaction * will get replayed and the file will be truncated. This could * be hours/days/... after the allocation occurred. * * The fix for this is to do two dummy transactions when the * system is idle. We need two dummy transaction because the h_tail_lsn * in the log record header needs to point beyond the last possible * non-dummy transaction. The first dummy changes the h_tail_lsn to * the first transaction before the dummy. The second dummy causes * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. * * These dummy transactions get committed when everything * is idle (after there has been some activity). * * There are 5 states used to control this. * * IDLE -- no logging has been done on the file system or * we are done covering previous transactions. * NEED -- logging has occurred and we need a dummy transaction * when the log becomes idle. * DONE -- we were in the NEED state and have committed a dummy * transaction. * NEED2 -- we detected that a dummy transaction has gone to the * on disk log with no other transactions. * DONE2 -- we committed a dummy transaction when in the NEED2 state. * * There are two places where we switch states: * * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. * We commit the dummy transaction and switch to DONE or DONE2, * respectively. In all other states, we don't do anything. * * 2.) When we finish writing the on-disk log (xlog_state_clean_log). * * No matter what state we are in, if this isn't the dummy * transaction going out, the next state is NEED. * So, if we aren't in the DONE or DONE2 states, the next state * is NEED. We can't be finishing a write of the dummy record * unless it was committed and the state switched to DONE or DONE2. * * If we are in the DONE state and this was a write of the * dummy transaction, we move to NEED2. * * If we are in the DONE2 state and this was a write of the * dummy transaction, we move to IDLE. * * * Writing only one dummy transaction can get appended to * one file space allocation. When this happens, the log recovery * code replays the space allocation and a file could be truncated. * This is why we have the NEED2 and DONE2 states before going idle. */ #define XLOG_STATE_COVER_IDLE 0 #define XLOG_STATE_COVER_NEED 1 #define XLOG_STATE_COVER_DONE 2 #define XLOG_STATE_COVER_NEED2 3 #define XLOG_STATE_COVER_DONE2 4 #define XLOG_COVER_OPS 5 struct xlog_ticket { struct list_head t_queue; /* reserve/write queue */ struct task_struct *t_task; /* task that owns this ticket */ xlog_tid_t t_tid; /* transaction identifier */ atomic_t t_ref; /* ticket reference count */ int t_curr_res; /* current reservation */ int t_unit_res; /* unit reservation */ char t_ocnt; /* original unit count */ char t_cnt; /* current unit count */ uint8_t t_flags; /* properties of reservation */ int t_iclog_hdrs; /* iclog hdrs in t_curr_res */ }; /* * In-core log structure. * * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. * - ic_next is the pointer to the next iclog in the ring. * - ic_log is a pointer back to the global log structure. * - ic_size is the full size of the log buffer, minus the cycle headers. * - ic_offset is the current number of bytes written to in this iclog. * - ic_refcnt is bumped when someone is writing to the log. * - ic_state is the state of the iclog. * * Because of cacheline contention on large machines, we need to separate * various resources onto different cachelines. To start with, make the * structure cacheline aligned. The following fields can be contended on * by independent processes: * * - ic_callbacks * - ic_refcnt * - fields protected by the global l_icloglock * * so we need to ensure that these fields are located in separate cachelines. * We'll put all the read-only and l_icloglock fields in the first cacheline, * and move everything else out to subsequent cachelines. */ struct xlog_in_core { wait_queue_head_t ic_force_wait; wait_queue_head_t ic_write_wait; struct xlog_in_core *ic_next; struct xlog_in_core *ic_prev; struct xlog *ic_log; u32 ic_size; u32 ic_offset; enum xlog_iclog_state ic_state; unsigned int ic_flags; void *ic_datap; /* pointer to iclog data */ struct list_head ic_callbacks; /* reference counts need their own cacheline */ atomic_t ic_refcnt ____cacheline_aligned_in_smp; struct xlog_rec_header *ic_header; #ifdef DEBUG bool ic_fail_crc : 1; #endif struct semaphore ic_sema; struct work_struct ic_end_io_work; struct bio ic_bio; struct bio_vec ic_bvec[]; }; /* * The CIL context is used to aggregate per-transaction details as well be * passed to the iclog for checkpoint post-commit processing. After being * passed to the iclog, another context needs to be allocated for tracking the * next set of transactions to be aggregated into a checkpoint. */ struct xfs_cil; struct xfs_cil_ctx { struct xfs_cil *cil; xfs_csn_t sequence; /* chkpt sequence # */ xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ struct xlog_in_core *commit_iclog; struct xlog_ticket *ticket; /* chkpt ticket */ atomic_t space_used; /* aggregate size of regions */ struct xfs_busy_extents busy_extents; struct list_head log_items; /* log items in chkpt */ struct list_head lv_chain; /* logvecs being pushed */ struct list_head iclog_entry; struct list_head committing; /* ctx committing list */ struct work_struct push_work; atomic_t order_id; /* * CPUs that could have added items to the percpu CIL data. Access is * coordinated with xc_ctx_lock. */ struct cpumask cil_pcpmask; }; /* * Per-cpu CIL tracking items */ struct xlog_cil_pcp { int32_t space_used; uint32_t space_reserved; struct list_head busy_extents; struct list_head log_items; }; /* * Committed Item List structure * * This structure is used to track log items that have been committed but not * yet written into the log. It is used only when the delayed logging mount * option is enabled. * * This structure tracks the list of committing checkpoint contexts so * we can avoid the problem of having to hold out new transactions during a * flush until we have a the commit record LSN of the checkpoint. We can * traverse the list of committing contexts in xlog_cil_push_lsn() to find a * sequence match and extract the commit LSN directly from there. If the * checkpoint is still in the process of committing, we can block waiting for * the commit LSN to be determined as well. This should make synchronous * operations almost as efficient as the old logging methods. */ struct xfs_cil { struct xlog *xc_log; unsigned long xc_flags; atomic_t xc_iclog_hdrs; struct workqueue_struct *xc_push_wq; struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; struct xfs_cil_ctx *xc_ctx; spinlock_t xc_push_lock ____cacheline_aligned_in_smp; xfs_csn_t xc_push_seq; bool xc_push_commit_stable; struct list_head xc_committing; wait_queue_head_t xc_commit_wait; wait_queue_head_t xc_start_wait; xfs_csn_t xc_current_sequence; wait_queue_head_t xc_push_wait; /* background push throttle */ void __percpu *xc_pcp; /* percpu CIL structures */ } ____cacheline_aligned_in_smp; /* xc_flags bit values */ #define XLOG_CIL_EMPTY 1 #define XLOG_CIL_PCP_SPACE 2 /* * The amount of log space we allow the CIL to aggregate is difficult to size. * Whatever we choose, we have to make sure we can get a reservation for the * log space effectively, that it is large enough to capture sufficient * relogging to reduce log buffer IO significantly, but it is not too large for * the log or induces too much latency when writing out through the iclogs. We * track both space consumed and the number of vectors in the checkpoint * context, so we need to decide which to use for limiting. * * Every log buffer we write out during a push needs a header reserved, which * is at least one sector and more for v2 logs. Hence we need a reservation of * at least 512 bytes per 32k of log space just for the LR headers. That means * 16KB of reservation per megabyte of delayed logging space we will consume, * plus various headers. The number of headers will vary based on the num of * io vectors, so limiting on a specific number of vectors is going to result * in transactions of varying size. IOWs, it is more consistent to track and * limit space consumed in the log rather than by the number of objects being * logged in order to prevent checkpoint ticket overruns. * * Further, use of static reservations through the log grant mechanism is * problematic. It introduces a lot of complexity (e.g. reserve grant vs write * grant) and a significant deadlock potential because regranting write space * can block on log pushes. Hence if we have to regrant log space during a log * push, we can deadlock. * * However, we can avoid this by use of a dynamic "reservation stealing" * technique during transaction commit whereby unused reservation space in the * transaction ticket is transferred to the CIL ctx commit ticket to cover the * space needed by the checkpoint transaction. This means that we never need to * specifically reserve space for the CIL checkpoint transaction, nor do we * need to regrant space once the checkpoint completes. This also means the * checkpoint transaction ticket is specific to the checkpoint context, rather * than the CIL itself. * * With dynamic reservations, we can effectively make up arbitrary limits for * the checkpoint size so long as they don't violate any other size rules. * Recovery imposes a rule that no transaction exceed half the log, so we are * limited by that. Furthermore, the log transaction reservation subsystem * tries to keep 25% of the log free, so we need to keep below that limit or we * risk running out of free log space to start any new transactions. * * In order to keep background CIL push efficient, we only need to ensure the * CIL is large enough to maintain sufficient in-memory relogging to avoid * repeated physical writes of frequently modified metadata. If we allow the CIL * to grow to a substantial fraction of the log, then we may be pinning hundreds * of megabytes of metadata in memory until the CIL flushes. This can cause * issues when we are running low on memory - pinned memory cannot be reclaimed, * and the CIL consumes a lot of memory. Hence we need to set an upper physical * size limit for the CIL that limits the maximum amount of memory pinned by the * CIL but does not limit performance by reducing relogging efficiency * significantly. * * As such, the CIL push threshold ends up being the smaller of two thresholds: * - a threshold large enough that it allows CIL to be pushed and progress to be * made without excessive blocking of incoming transaction commits. This is * defined to be 12.5% of the log space - half the 25% push threshold of the * AIL. * - small enough that it doesn't pin excessive amounts of memory but maintains * close to peak relogging efficiency. This is defined to be 16x the iclog * buffer window (32MB) as measurements have shown this to be roughly the * point of diminishing performance increases under highly concurrent * modification workloads. * * To prevent the CIL from overflowing upper commit size bounds, we introduce a * new threshold at which we block committing transactions until the background * CIL commit commences and switches to a new context. While this is not a hard * limit, it forces the process committing a transaction to the CIL to block and * yeild the CPU, giving the CIL push work a chance to be scheduled and start * work. This prevents a process running lots of transactions from overfilling * the CIL because it is not yielding the CPU. We set the blocking limit at * twice the background push space threshold so we keep in line with the AIL * push thresholds. * * Note: this is not a -hard- limit as blocking is applied after the transaction * is inserted into the CIL and the push has been triggered. It is largely a * throttling mechanism that allows the CIL push to be scheduled and run. A hard * limit will be difficult to implement without introducing global serialisation * in the CIL commit fast path, and it's not at all clear that we actually need * such hard limits given the ~7 years we've run without a hard limit before * finding the first situation where a checkpoint size overflow actually * occurred. Hence the simple throttle, and an ASSERT check to tell us that * we've overrun the max size. */ #define XLOG_CIL_SPACE_LIMIT(log) \ min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ (XLOG_CIL_SPACE_LIMIT(log) * 2) /* * ticket grant locks, queues and accounting have their own cachlines * as these are quite hot and can be operated on concurrently. */ struct xlog_grant_head { spinlock_t lock ____cacheline_aligned_in_smp; struct list_head waiters; atomic64_t grant; }; /* * The reservation head lsn is not made up of a cycle number and block number. * Instead, it uses a cycle number and byte number. Logs don't expect to * overflow 31 bits worth of byte offset, so using a byte number will mean * that round off problems won't occur when releasing partial reservations. */ struct xlog { /* The following fields don't need locking */ struct xfs_mount *l_mp; /* mount point */ struct xfs_ail *l_ailp; /* AIL log is working with */ struct xfs_cil *l_cilp; /* CIL log is working with */ struct xfs_buftarg *l_targ; /* buftarg of log */ struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ struct delayed_work l_work; /* background flush work */ long l_opstate; /* operational state */ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ struct list_head *l_buf_cancel_table; struct list_head r_dfops; /* recovered log intent items */ int l_iclog_hsize; /* size of iclog header */ uint l_sectBBsize; /* sector size in BBs (2^n) */ int l_iclog_size; /* size of log in bytes */ int l_iclog_bufs; /* number of iclog buffers */ xfs_daddr_t l_logBBstart; /* start block of log */ int l_logsize; /* size of log in bytes */ int l_logBBsize; /* size of log in BB chunks */ /* The following block of fields are changed while holding icloglock */ wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; /* waiting for iclog flush */ int l_covered_state;/* state of "covering disk * log entries" */ struct xlog_in_core *l_iclog; /* head log queue */ spinlock_t l_icloglock; /* grab to change iclog state */ int l_curr_cycle; /* Cycle number of log writes */ int l_prev_cycle; /* Cycle number before last * block increment */ int l_curr_block; /* current logical log block */ int l_prev_block; /* previous logical log block */ /* * l_tail_lsn is atomic so it can be set and read without needing to * hold specific locks. To avoid operations contending with other hot * objects, it on a separate cacheline. */ /* lsn of 1st LR with unflushed * buffers */ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; struct xlog_grant_head l_reserve_head; struct xlog_grant_head l_write_head; uint64_t l_tail_space; struct xfs_kobj l_kobj; /* log recovery lsn tracking (for buffer submission */ xfs_lsn_t l_recovery_lsn; uint32_t l_iclog_roundoff;/* padding roundoff */ }; /* * Bits for operational state */ #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being shutdown */ #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ #define XLOG_SHUTDOWN_STARTED 4 /* xlog_force_shutdown() exclusion */ static inline bool xlog_recovery_needed(struct xlog *log) { return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); } static inline bool xlog_in_recovery(struct xlog *log) { return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); } static inline bool xlog_is_shutdown(struct xlog *log) { return test_bit(XLOG_IO_ERROR, &log->l_opstate); } /* * Wait until the xlog_force_shutdown() has marked the log as shut down * so xlog_is_shutdown() will always return true. */ static inline void xlog_shutdown_wait( struct xlog *log) { wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); } /* common routines */ extern int xlog_recover( struct xlog *log); extern int xlog_recover_finish( struct xlog *log); extern void xlog_recover_cancel(struct xlog *); __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, char *dp, unsigned int hdrsize, unsigned int size); extern struct kmem_cache *xfs_log_ticket_cache; struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, int count, bool permanent); void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); void xlog_print_trans(struct xfs_trans *); int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, struct list_head *lv_chain, struct xlog_ticket *tic, uint32_t len); void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, int eventual_size); int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, struct xlog_ticket *ticket); /* * When we crack an atomic LSN, we sample it first so that the value will not * change while we are cracking it into the component values. This means we * will always get consistent component values to work from. This should always * be used to sample and crack LSNs that are stored and updated in atomic * variables. */ static inline void xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) { xfs_lsn_t val = atomic64_read(lsn); *cycle = CYCLE_LSN(val); *block = BLOCK_LSN(val); } /* * Calculate and assign a value to an atomic LSN variable from component pieces. */ static inline void xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) { atomic64_set(lsn, xlog_assign_lsn(cycle, block)); } /* * Committed Item List interfaces */ int xlog_cil_init(struct xlog *log); void xlog_cil_init_post_recovery(struct xlog *log); void xlog_cil_destroy(struct xlog *log); bool xlog_cil_empty(struct xlog *log); void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, xfs_csn_t *commit_seq, bool regrant); void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, struct xlog_in_core *iclog); /* * CIL force routines */ void xlog_cil_flush(struct xlog *log); xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); static inline void xlog_cil_force(struct xlog *log) { xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); } /* * Wrapper function for waiting on a wait queue serialised against wakeups * by a spinlock. This matches the semantics of all the wait queues used in the * log code. */ static inline void xlog_wait( struct wait_queue_head *wq, struct spinlock *lock) __releases(lock) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(wq, &wait); __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock(lock); schedule(); remove_wait_queue(wq, &wait); } int xlog_wait_on_iclog(struct xlog_in_core *iclog) __releases(iclog->ic_log->l_icloglock); /* Calculate the distance between two LSNs in bytes */ static inline uint64_t xlog_lsn_sub( struct xlog *log, xfs_lsn_t high, xfs_lsn_t low) { uint32_t hi_cycle = CYCLE_LSN(high); uint32_t hi_block = BLOCK_LSN(high); uint32_t lo_cycle = CYCLE_LSN(low); uint32_t lo_block = BLOCK_LSN(low); if (hi_cycle == lo_cycle) return BBTOB(hi_block - lo_block); ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log)); return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block); } void xlog_grant_return_space(struct xlog *log, xfs_lsn_t old_head, xfs_lsn_t new_head); /* * The LSN is valid so long as it is behind the current LSN. If it isn't, this * means that the next log record that includes this metadata could have a * smaller LSN. In turn, this means that the modification in the log would not * replay. */ static inline bool xlog_valid_lsn( struct xlog *log, xfs_lsn_t lsn) { int cur_cycle; int cur_block; bool valid = true; /* * First, sample the current lsn without locking to avoid added * contention from metadata I/O. The current cycle and block are updated * (in xlog_state_switch_iclogs()) and read here in a particular order * to avoid false negatives (e.g., thinking the metadata LSN is valid * when it is not). * * The current block is always rewound before the cycle is bumped in * xlog_state_switch_iclogs() to ensure the current LSN is never seen in * a transiently forward state. Instead, we can see the LSN in a * transiently behind state if we happen to race with a cycle wrap. */ cur_cycle = READ_ONCE(log->l_curr_cycle); smp_rmb(); cur_block = READ_ONCE(log->l_curr_block); if ((CYCLE_LSN(lsn) > cur_cycle) || (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { /* * If the metadata LSN appears invalid, it's possible the check * above raced with a wrap to the next log cycle. Grab the lock * to check for sure. */ spin_lock(&log->l_icloglock); cur_cycle = log->l_curr_cycle; cur_block = log->l_curr_block; spin_unlock(&log->l_icloglock); if ((CYCLE_LSN(lsn) > cur_cycle) || (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) valid = false; } return valid; } /* * Log vector and shadow buffers can be large, so we need to use kvmalloc() here * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts * to fall back to vmalloc, so we can't actually do anything useful with gfp * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() * will do direct reclaim and compaction in the slow path, both of which are * horrendously expensive. We just want kmalloc to fail fast and fall back to * vmalloc if it can't get something straight away from the free lists or * buddy allocator. Hence we have to open code kvmalloc outselves here. * * This assumes that the caller uses memalloc_nofs_save task context here, so * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS * allocations. This is actually the only way to make vmalloc() do GFP_NOFS * allocations, so lets just all pretend this is a GFP_KERNEL context * operation.... */ static inline void * xlog_kvmalloc( size_t buf_size) { gfp_t flags = GFP_KERNEL; void *p; flags &= ~__GFP_DIRECT_RECLAIM; flags |= __GFP_NOWARN | __GFP_NORETRY; do { p = kmalloc(buf_size, flags); if (!p) p = vmalloc(buf_size); } while (!p); return p; } /* * Given a count of iovecs and space for a log item, compute the space we need * in the log to store that data plus the log headers. */ static inline unsigned int xlog_item_space( unsigned int niovecs, unsigned int nbytes) { nbytes += niovecs * (sizeof(uint64_t) + sizeof(struct xlog_op_header)); return round_up(nbytes, sizeof(uint64_t)); } /* * Cycles over XLOG_CYCLE_DATA_SIZE overflow into the extended header that was * added for v2 logs. Addressing for the cycles array there is off by one, * because the first batch of cycles is in the original header. */ static inline __be32 *xlog_cycle_data(struct xlog_rec_header *rhead, unsigned i) { if (i >= XLOG_CYCLE_DATA_SIZE) { unsigned j = i / XLOG_CYCLE_DATA_SIZE; unsigned k = i % XLOG_CYCLE_DATA_SIZE; return &rhead->h_ext[j - 1].xh_cycle_data[k]; } return &rhead->h_cycle_data[i]; } #endif /* __XFS_LOG_PRIV_H__ */ |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 | // SPDX-License-Identifier: GPL-2.0 /* * The USB Monitor, inspired by Dave Harding's USBMon. * * This is a binary format reader. * * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/export.h> #include <linux/usb.h> #include <linux/poll.h> #include <linux/compat.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/time64.h> #include <linux/uaccess.h> #include "usb_mon.h" /* * Defined by USB 2.0 clause 9.3, table 9.2. */ #define SETUP_LEN 8 /* ioctl macros */ #define MON_IOC_MAGIC 0x92 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) /* #9 was MON_IOCT_SETAPI */ #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) #ifdef CONFIG_COMPAT #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) #endif /* * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). * But it's all right. Just use a simple way to make sure the chunk is never * smaller than a page. * * N.B. An application does not know our chunk size. * * Woops, get_zeroed_page() returns a single page. I guess we're stuck with * page-sized chunks for the time being. */ #define CHUNK_SIZE PAGE_SIZE #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) /* * The magic limit was calculated so that it allows the monitoring * application to pick data once in two ticks. This way, another application, * which presumably drives the bus, gets to hog CPU, yet we collect our data. * * Originally, for a 480 Mbit/s bus this required a buffer of about 1 MB. For * modern 20 Gbps buses, this value increases to over 50 MB. The maximum * buffer size is set to 64 MiB to accommodate this. * * This is still too much for most cases, where we just snoop a few * descriptor fetches for enumeration. So, the default is a "reasonable" * amount for typical, low-throughput use cases. * * XXX What about multi-megabyte URBs which take minutes to transfer? */ #define BUFF_MAX CHUNK_ALIGN(64*1024*1024) #define BUFF_DFL CHUNK_ALIGN(300*1024) #define BUFF_MIN CHUNK_ALIGN(8*1024) /* * The per-event API header (2 per URB). * * This structure is seen in userland as defined by the documentation. */ struct mon_bin_hdr { u64 id; /* URB ID - from submission to callback */ unsigned char type; /* Same as in text API; extensible. */ unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ unsigned char epnum; /* Endpoint number and transfer direction */ unsigned char devnum; /* Device address */ unsigned short busnum; /* Bus number */ char flag_setup; char flag_data; s64 ts_sec; /* ktime_get_real_ts64 */ s32 ts_usec; /* ktime_get_real_ts64 */ int status; unsigned int len_urb; /* Length of data (submitted or actual) */ unsigned int len_cap; /* Delivered length */ union { unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ struct iso_rec { int error_count; int numdesc; } iso; } s; int interval; int start_frame; unsigned int xfer_flags; unsigned int ndesc; /* Actual number of ISO descriptors */ }; /* * ISO vector, packed into the head of data stream. * This has to take 16 bytes to make sure that the end of buffer * wrap is not happening in the middle of a descriptor. */ struct mon_bin_isodesc { int iso_status; unsigned int iso_off; unsigned int iso_len; u32 _pad; }; /* per file statistic */ struct mon_bin_stats { u32 queued; u32 dropped; }; struct mon_bin_get { struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ void __user *data; size_t alloc; /* Length of data (can be zero) */ }; struct mon_bin_mfetch { u32 __user *offvec; /* Vector of events fetched */ u32 nfetch; /* Number of events to fetch (out: fetched) */ u32 nflush; /* Number of events to flush */ }; #ifdef CONFIG_COMPAT struct mon_bin_get32 { u32 hdr32; u32 data32; u32 alloc32; }; struct mon_bin_mfetch32 { u32 offvec32; u32 nfetch32; u32 nflush32; }; #endif /* Having these two values same prevents wrapping of the mon_bin_hdr */ #define PKT_ALIGN 64 #define PKT_SIZE 64 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ #define PKT_SZ_API1 64 /* API 1 size: extra fields */ #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ /* max number of USB bus supported */ #define MON_BIN_MAX_MINOR 128 /* * The buffer: map of used pages. */ struct mon_pgmap { struct page *pg; unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ }; /* * This gets associated with an open file struct. */ struct mon_reader_bin { /* The buffer: one per open. */ spinlock_t b_lock; /* Protect b_cnt, b_in */ unsigned int b_size; /* Current size of the buffer - bytes */ unsigned int b_cnt; /* Bytes used */ unsigned int b_in, b_out; /* Offsets into buffer - bytes */ unsigned int b_read; /* Amount of read data in curr. pkt. */ struct mon_pgmap *b_vec; /* The map array */ wait_queue_head_t b_wait; /* Wait for data here */ struct mutex fetch_lock; /* Protect b_read, b_out */ int mmap_active; /* A list of these is needed for "bus 0". Some time later. */ struct mon_reader r; /* Stats */ unsigned int cnt_lost; }; static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, unsigned int offset) { return (struct mon_bin_hdr *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); } #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) static unsigned char xfer_to_pipe[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; static const struct class mon_bin_class = { .name = "usbmon", }; static dev_t mon_bin_dev0; static struct cdev mon_bin_cdev; static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size); static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); static int mon_alloc_buff(struct mon_pgmap *map, int npages); static void mon_free_buff(struct mon_pgmap *map, int npages); /* * This is a "chunked memcpy". It does not manipulate any counters. */ static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, unsigned int off, const unsigned char *from, unsigned int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; memcpy(buf, from, step_len); if ((off += step_len) >= this->b_size) off = 0; from += step_len; length -= step_len; } return off; } /* * This is a little worse than the above because it's "chunked copy_to_user". * The return value is an error code, not an offset. */ static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, char __user *to, int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; if (copy_to_user(to, buf, step_len)) return -EINVAL; if ((off += step_len) >= this->b_size) off = 0; to += step_len; length -= step_len; } return 0; } /* * Allocate an (aligned) area in the buffer. * This is called under b_lock. * Returns ~0 on failure. */ static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; offset = rp->b_in; rp->b_cnt += size; if ((rp->b_in += size) >= rp->b_size) rp->b_in -= rp->b_size; return offset; } /* * This is the same thing as mon_buff_area_alloc, only it does not allow * buffers to wrap. This is needed by applications which pass references * into mmap-ed buffers up their stacks (libpcap can do that). * * Currently, we always have the header stuck with the data, although * it is not strictly speaking necessary. * * When a buffer would wrap, we place a filler packet to mark the space. */ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; unsigned int fill_size; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; if (rp->b_in + size > rp->b_size) { /* * This would wrap. Find if we still have space after * skipping to the end of the buffer. If we do, place * a filler packet and allocate a new packet. */ fill_size = rp->b_size - rp->b_in; if (rp->b_cnt + size + fill_size > rp->b_size) return ~0; mon_buff_area_fill(rp, rp->b_in, fill_size); offset = 0; rp->b_in = size; rp->b_cnt += size + fill_size; } else if (rp->b_in + size == rp->b_size) { offset = rp->b_in; rp->b_in = 0; rp->b_cnt += size; } else { offset = rp->b_in; rp->b_in += size; rp->b_cnt += size; } return offset; } /* * Return a few (kilo-)bytes to the head of the buffer. * This is used if a data fetch fails. */ static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) { /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ rp->b_cnt -= size; if (rp->b_in < size) rp->b_in += rp->b_size; rp->b_in -= size; } /* * This has to be called under both b_lock and fetch_lock, because * it accesses both b_cnt and b_out. */ static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) { size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); rp->b_cnt -= size; if ((rp->b_out += size) >= rp->b_size) rp->b_out -= rp->b_size; } static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size) { struct mon_bin_hdr *ep; ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = '@'; ep->len_cap = size - PKT_SIZE; } static inline char mon_bin_get_setup(unsigned char *setupb, const struct urb *urb, char ev_type) { if (urb->setup_packet == NULL) return 'Z'; memcpy(setupb, urb->setup_packet, SETUP_LEN); return 0; } static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, unsigned int length, char *flag) { int i; struct scatterlist *sg; unsigned int this_len; *flag = 0; if (urb->num_sgs == 0) { if (urb->transfer_buffer == NULL) { *flag = 'Z'; return length; } mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); length = 0; } else { /* If IOMMU coalescing occurred, we cannot trust sg_page */ if (urb->transfer_flags & URB_DMA_SG_COMBINED) { *flag = 'D'; return length; } /* Copy up to the first non-addressable segment */ for_each_sg(urb->sg, sg, urb->num_sgs, i) { if (length == 0 || PageHighMem(sg_page(sg))) break; this_len = min_t(unsigned int, sg->length, length); offset = mon_copy_to_buff(rp, offset, sg_virt(sg), this_len); length -= this_len; } if (i == 0) *flag = 'D'; } return length; } /* * This is the look-ahead pass in case of 'C Zi', when actual_length cannot * be used to determine the length of the whole contiguous buffer. */ static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, struct urb *urb, unsigned int ndesc) { struct usb_iso_packet_descriptor *fp; unsigned int length; length = 0; fp = urb->iso_frame_desc; while (ndesc-- != 0) { if (fp->actual_length != 0) { if (fp->offset + fp->actual_length > length) length = fp->offset + fp->actual_length; } fp++; } return length; } static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) { struct mon_bin_isodesc *dp; struct usb_iso_packet_descriptor *fp; fp = urb->iso_frame_desc; while (ndesc-- != 0) { dp = (struct mon_bin_isodesc *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); dp->iso_status = fp->status; dp->iso_off = fp->offset; dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; dp->_pad = 0; if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) offset = 0; fp++; } } static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, char ev_type, int status) { const struct usb_endpoint_descriptor *epd = &urb->ep->desc; struct timespec64 ts; unsigned long flags; unsigned int urb_length; unsigned int offset; unsigned int length; unsigned int delta; unsigned int ndesc, lendesc; unsigned char dir; struct mon_bin_hdr *ep; char data_tag = 0; ktime_get_real_ts64(&ts); spin_lock_irqsave(&rp->b_lock, flags); /* * Find the maximum allowable length, then allocate space. */ urb_length = (ev_type == 'S') ? urb->transfer_buffer_length : urb->actual_length; length = urb_length; if (usb_endpoint_xfer_isoc(epd)) { if (urb->number_of_packets < 0) { ndesc = 0; } else if (urb->number_of_packets >= ISODESC_MAX) { ndesc = ISODESC_MAX; } else { ndesc = urb->number_of_packets; } if (ev_type == 'C' && usb_urb_dir_in(urb)) length = mon_bin_collate_isodesc(rp, urb, ndesc); } else { ndesc = 0; } lendesc = ndesc*sizeof(struct mon_bin_isodesc); /* not an issue unless there's a subtle bug in a HCD somewhere */ if (length >= urb->transfer_buffer_length) length = urb->transfer_buffer_length; if (length >= rp->b_size/5) length = rp->b_size/5; if (usb_urb_dir_in(urb)) { if (ev_type == 'S') { length = 0; data_tag = '<'; } /* Cannot rely on endpoint number in case of control ep.0 */ dir = USB_DIR_IN; } else { if (ev_type == 'C') { length = 0; data_tag = '>'; } dir = 0; } if (rp->mmap_active) { offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE + lendesc); } else { offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); } if (offset == ~0) { rp->cnt_lost++; spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; /* * Fill the allocated area. */ memset(ep, 0, PKT_SIZE); ep->type = ev_type; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; ep->epnum = dir | usb_endpoint_num(epd); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = status; ep->len_urb = urb_length; ep->len_cap = length + lendesc; ep->xfer_flags = urb->transfer_flags; if (usb_endpoint_xfer_int(epd)) { ep->interval = urb->interval; } else if (usb_endpoint_xfer_isoc(epd)) { ep->interval = urb->interval; ep->start_frame = urb->start_frame; ep->s.iso.error_count = urb->error_count; ep->s.iso.numdesc = urb->number_of_packets; } if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); } else { ep->flag_setup = '-'; } if (ndesc != 0) { ep->ndesc = ndesc; mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); if ((offset += lendesc) >= rp->b_size) offset -= rp->b_size; } if (length != 0) { length = mon_bin_get_data(rp, offset, urb, length, &ep->flag_data); if (length > 0) { delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); ep->len_cap -= length; delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); mon_buff_area_shrink(rp, delta); } } else { ep->flag_data = data_tag; } spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static void mon_bin_submit(void *data, struct urb *urb) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'S', -EINPROGRESS); } static void mon_bin_complete(void *data, struct urb *urb, int status) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'C', status); } static void mon_bin_error(void *data, struct urb *urb, int error) { struct mon_reader_bin *rp = data; struct timespec64 ts; unsigned long flags; unsigned int offset; struct mon_bin_hdr *ep; ktime_get_real_ts64(&ts); spin_lock_irqsave(&rp->b_lock, flags); offset = mon_buff_area_alloc(rp, PKT_SIZE); if (offset == ~0) { /* Not incrementing cnt_lost. Just because. */ spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = 'E'; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; ep->epnum |= usb_endpoint_num(&urb->ep->desc); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = error; ep->flag_setup = '-'; ep->flag_data = 'E'; spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static int mon_bin_open(struct inode *inode, struct file *file) { struct mon_bus *mbus; struct mon_reader_bin *rp; size_t size; int rc; mutex_lock(&mon_lock); mbus = mon_bus_lookup(iminor(inode)); if (mbus == NULL) { mutex_unlock(&mon_lock); return -ENODEV; } if (mbus != &mon_bus0 && mbus->u_bus == NULL) { printk(KERN_ERR TAG ": consistency error on open\n"); mutex_unlock(&mon_lock); return -ENODEV; } rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); if (rp == NULL) { rc = -ENOMEM; goto err_alloc; } spin_lock_init(&rp->b_lock); init_waitqueue_head(&rp->b_wait); mutex_init(&rp->fetch_lock); rp->b_size = BUFF_DFL; size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { rc = -ENOMEM; goto err_allocvec; } if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) goto err_allocbuff; rp->r.m_bus = mbus; rp->r.r_data = rp; rp->r.rnf_submit = mon_bin_submit; rp->r.rnf_error = mon_bin_error; rp->r.rnf_complete = mon_bin_complete; mon_reader_add(mbus, &rp->r); file->private_data = rp; mutex_unlock(&mon_lock); return 0; err_allocbuff: kfree(rp->b_vec); err_allocvec: kfree(rp); err_alloc: mutex_unlock(&mon_lock); return rc; } /* * Extract an event from buffer and copy it to user space. * Wait if there is no event ready. * Returns zero or error. */ static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, void __user *data, unsigned int nbytes) { unsigned long flags; struct mon_bin_hdr *ep; size_t step_len; unsigned int offset; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (copy_to_user(hdr, ep, hdrbytes)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } step_len = min(ep->len_cap, nbytes); if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; if (copy_from_buf(rp, offset, data, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return 0; } static int mon_bin_release(struct inode *inode, struct file *file) { struct mon_reader_bin *rp = file->private_data; struct mon_bus* mbus = rp->r.m_bus; mutex_lock(&mon_lock); if (mbus->nreaders <= 0) { printk(KERN_ERR TAG ": consistency error on close\n"); mutex_unlock(&mon_lock); return 0; } mon_reader_del(mbus, &rp->r); mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); kfree(rp); mutex_unlock(&mon_lock); return 0; } static ssize_t mon_bin_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct mon_reader_bin *rp = file->private_data; unsigned int hdrbytes = PKT_SZ_API0; unsigned long flags; struct mon_bin_hdr *ep; unsigned int offset; size_t step_len; char *ptr; ssize_t done = 0; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (rp->b_read < hdrbytes) { step_len = min_t(size_t, nbytes, hdrbytes - rp->b_read); ptr = ((char *)ep) + rp->b_read; if (step_len && copy_to_user(buf, ptr, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } if (rp->b_read >= hdrbytes) { step_len = ep->len_cap; step_len -= rp->b_read - hdrbytes; if (step_len > nbytes) step_len = nbytes; offset = rp->b_out + PKT_SIZE; offset += rp->b_read - hdrbytes; if (offset >= rp->b_size) offset -= rp->b_size; if (copy_from_buf(rp, offset, buf, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } /* * Check if whole packet was read, and if so, jump to the next one. */ if (rp->b_read >= hdrbytes + ep->len_cap) { spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; } mutex_unlock(&rp->fetch_lock); return done; } /* * Remove at most nevents from chunked buffer. * Returns the number of removed events. */ static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) { unsigned long flags; struct mon_bin_hdr *ep; int i; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); for (i = 0; i < nevents; ++i) { if (MON_RING_EMPTY(rp)) break; ep = MON_OFF2HDR(rp, rp->b_out); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); } spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return i; } /* * Fetch at most max event offsets into the buffer and put them into vec. * The events are usually freed later with mon_bin_flush. * Return the effective number of events fetched. */ static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, u32 __user *vec, unsigned int max) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { if (nevents >= max) break; ep = MON_OFF2HDR(rp, cur_out); if (put_user(cur_out, &vec[nevents])) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* * Count events. This is almost the same as the above mon_bin_fetch, * only we do not store offsets into user vector, and we have no limit. */ static int mon_bin_queued(struct mon_reader_bin *rp) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { ep = MON_OFF2HDR(rp, cur_out); nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* */ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; // struct mon_bus* mbus = rp->r.m_bus; int ret = 0; struct mon_bin_hdr *ep; unsigned long flags; switch (cmd) { case MON_IOCQ_URB_LEN: /* * N.B. This only returns the size of data, without the header. */ spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) { ep = MON_OFF2HDR(rp, rp->b_out); ret = ep->len_cap; } spin_unlock_irqrestore(&rp->b_lock, flags); break; case MON_IOCQ_RING_SIZE: mutex_lock(&rp->fetch_lock); ret = rp->b_size; mutex_unlock(&rp->fetch_lock); break; case MON_IOCT_RING_SIZE: /* * Changing the buffer size will flush it's contents; the new * buffer is allocated before releasing the old one to be sure * the device will stay functional also in case of memory * pressure. */ { int size; struct mon_pgmap *vec; if (arg < BUFF_MIN || arg > BUFF_MAX) return -EINVAL; size = CHUNK_ALIGN(arg); vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap), GFP_KERNEL); if (vec == NULL) { ret = -ENOMEM; break; } ret = mon_alloc_buff(vec, size/CHUNK_SIZE); if (ret < 0) { kfree(vec); break; } mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); if (rp->mmap_active) { mon_free_buff(vec, size/CHUNK_SIZE); kfree(vec); ret = -EBUSY; } else { mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); rp->b_vec = vec; rp->b_size = size; rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; rp->cnt_lost = 0; } spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } break; case MON_IOCH_MFLUSH: ret = mon_bin_flush(rp, arg); break; case MON_IOCX_GET: case MON_IOCX_GETX: { struct mon_bin_get getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get))) return -EFAULT; if (getb.alloc > 0x10000000) /* Want to cast to u32 */ return -EINVAL; ret = mon_bin_get_event(file, rp, getb.hdr, (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, getb.data, (unsigned int)getb.alloc); } break; case MON_IOCX_MFETCH: { struct mon_bin_mfetch mfetch; struct mon_bin_mfetch __user *uptr; uptr = (struct mon_bin_mfetch __user *)arg; if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush) { ret = mon_bin_flush(rp, mfetch.nflush); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush)) return -EFAULT; } ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch)) return -EFAULT; ret = 0; } break; case MON_IOCG_STATS: { struct mon_bin_stats __user *sp; unsigned int nevents; unsigned int ndropped; spin_lock_irqsave(&rp->b_lock, flags); ndropped = rp->cnt_lost; rp->cnt_lost = 0; spin_unlock_irqrestore(&rp->b_lock, flags); nevents = mon_bin_queued(rp); sp = (struct mon_bin_stats __user *)arg; if (put_user(ndropped, &sp->dropped)) return -EFAULT; if (put_user(nevents, &sp->queued)) return -EFAULT; } break; default: return -ENOTTY; } return ret; } #ifdef CONFIG_COMPAT static long mon_bin_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; int ret; switch (cmd) { case MON_IOCX_GET32: case MON_IOCX_GETX32: { struct mon_bin_get32 getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get32))) return -EFAULT; ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, compat_ptr(getb.data32), getb.alloc32); if (ret < 0) return ret; } return 0; case MON_IOCX_MFETCH32: { struct mon_bin_mfetch32 mfetch; struct mon_bin_mfetch32 __user *uptr; uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush32) { ret = mon_bin_flush(rp, mfetch.nflush32); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush32)) return -EFAULT; } ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), mfetch.nfetch32); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch32)) return -EFAULT; } return 0; case MON_IOCG_STATS: return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); case MON_IOCQ_URB_LEN: case MON_IOCQ_RING_SIZE: case MON_IOCT_RING_SIZE: case MON_IOCH_MFLUSH: return mon_bin_ioctl(file, cmd, arg); default: ; } return -ENOTTY; } #endif /* CONFIG_COMPAT */ static __poll_t mon_bin_poll(struct file *file, struct poll_table_struct *wait) { struct mon_reader_bin *rp = file->private_data; __poll_t mask = 0; unsigned long flags; if (file->f_mode & FMODE_READ) poll_wait(file, &rp->b_wait, wait); spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) mask |= EPOLLIN | EPOLLRDNORM; /* readable */ spin_unlock_irqrestore(&rp->b_lock, flags); return mask; } /* * open and close: just keep track of how many times the device is * mapped, to use the proper memory allocation function. */ static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; unsigned long flags; spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active++; spin_unlock_irqrestore(&rp->b_lock, flags); } static void mon_bin_vma_close(struct vm_area_struct *vma) { unsigned long flags; struct mon_reader_bin *rp = vma->vm_private_data; spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active--; spin_unlock_irqrestore(&rp->b_lock, flags); } /* * Map ring pages to user space. */ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) { struct mon_reader_bin *rp = vmf->vma->vm_private_data; unsigned long offset, chunk_idx; struct page *pageptr; unsigned long flags; spin_lock_irqsave(&rp->b_lock, flags); offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rp->b_size) { spin_unlock_irqrestore(&rp->b_lock, flags); return VM_FAULT_SIGBUS; } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); vmf->page = pageptr; spin_unlock_irqrestore(&rp->b_lock, flags); return 0; } static const struct vm_operations_struct mon_bin_vm_ops = { .open = mon_bin_vma_open, .close = mon_bin_vma_close, .fault = mon_bin_vma_fault, }; static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; if (vma->vm_flags & VM_WRITE) return -EPERM; vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE); vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); return 0; } static const struct file_operations mon_fops_binary = { .owner = THIS_MODULE, .open = mon_bin_open, .read = mon_bin_read, /* .write = mon_text_write, */ .poll = mon_bin_poll, .unlocked_ioctl = mon_bin_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mon_bin_compat_ioctl, #endif .release = mon_bin_release, .mmap = mon_bin_mmap, }; static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) { DECLARE_WAITQUEUE(waita, current); unsigned long flags; add_wait_queue(&rp->b_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); while (MON_RING_EMPTY(rp)) { spin_unlock_irqrestore(&rp->b_lock, flags); if (file->f_flags & O_NONBLOCK) { set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ } schedule(); if (signal_pending(current)) { remove_wait_queue(&rp->b_wait, &waita); return -EINTR; } set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); } spin_unlock_irqrestore(&rp->b_lock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return 0; } static int mon_alloc_buff(struct mon_pgmap *map, int npages) { int n; unsigned long vaddr; for (n = 0; n < npages; n++) { vaddr = get_zeroed_page(GFP_KERNEL); if (vaddr == 0) { while (n-- != 0) free_page((unsigned long) map[n].ptr); return -ENOMEM; } map[n].ptr = (unsigned char *) vaddr; map[n].pg = virt_to_page((void *) vaddr); } return 0; } static void mon_free_buff(struct mon_pgmap *map, int npages) { int n; for (n = 0; n < npages; n++) free_page((unsigned long) map[n].ptr); } int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) { struct device *dev; unsigned minor = ubus? ubus->busnum: 0; if (minor >= MON_BIN_MAX_MINOR) return 0; dev = device_create(&mon_bin_class, ubus ? ubus->controller : NULL, MKDEV(MAJOR(mon_bin_dev0), minor), NULL, "usbmon%d", minor); if (IS_ERR(dev)) return 0; mbus->classdev = dev; return 1; } void mon_bin_del(struct mon_bus *mbus) { device_destroy(&mon_bin_class, mbus->classdev->devt); } int __init mon_bin_init(void) { int rc; rc = class_register(&mon_bin_class); if (rc) goto err_class; rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); if (rc < 0) goto err_dev; cdev_init(&mon_bin_cdev, &mon_fops_binary); mon_bin_cdev.owner = THIS_MODULE; rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); if (rc < 0) goto err_add; return 0; err_add: unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); err_dev: class_unregister(&mon_bin_class); err_class: return rc; } void mon_bin_exit(void) { cdev_del(&mon_bin_cdev); unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); class_unregister(&mon_bin_class); } |
| 3170 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __X86_KERNEL_FPU_INTERNAL_H #define __X86_KERNEL_FPU_INTERNAL_H extern struct fpstate init_fpstate; /* CPU feature check wrappers */ static __always_inline __pure bool use_xsave(void) { return cpu_feature_enabled(X86_FEATURE_XSAVE); } static __always_inline __pure bool use_fxsr(void) { return cpu_feature_enabled(X86_FEATURE_FXSR); } #ifdef CONFIG_X86_DEBUG_FPU # define WARN_ON_FPU(x) WARN_ON_ONCE(x) #else # define WARN_ON_FPU(x) ({ BUILD_BUG_ON_INVALID(x); 0; }) #endif /* Used in init.c */ extern void fpstate_init_user(struct fpstate *fpstate); extern void fpstate_reset(struct fpu *fpu); #endif |
| 27 27 28 28 28 19 20 20 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_dir2.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_trans.h" #include "xfs_alloc.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_bmap_btree.h" #include "xfs_rtbitmap.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_trans_space.h" #include "xfs_buf_item.h" #include "xfs_trace.h" #include "xfs_attr_leaf.h" #include "xfs_filestream.h" #include "xfs_rmap.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" #include "xfs_refcount.h" #include "xfs_iomap.h" #include "xfs_health.h" #include "xfs_bmap_item.h" #include "xfs_symlink_remote.h" #include "xfs_inode_util.h" #include "xfs_rtgroup.h" #include "xfs_zone_alloc.h" struct kmem_cache *xfs_bmap_intent_cache; /* * Miscellaneous helper functions */ /* * Compute and fill in the value of the maximum depth of a bmap btree * in this filesystem. Done once, during mount. */ void xfs_bmap_compute_maxlevels( xfs_mount_t *mp, /* file system mount structure */ int whichfork) /* data or attr fork */ { uint64_t maxblocks; /* max blocks at this level */ xfs_extnum_t maxleafents; /* max leaf entries possible */ int level; /* btree level */ int maxrootrecs; /* max records in root block */ int minleafrecs; /* min records in leaf block */ int minnoderecs; /* min records in node block */ int sz; /* root block size */ /* * The maximum number of extents in a fork, hence the maximum number of * leaf entries, is controlled by the size of the on-disk extent count. * * Note that we can no longer assume that if we are in ATTR1 that the * fork offset of all the inodes will be * (xfs_default_attroffset(ip) >> 3) because we could have mounted with * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed * but probably at various positions. Therefore, for both ATTR1 and * ATTR2 we have to assume the worst case scenario of a minimum size * available. */ maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp), whichfork); if (whichfork == XFS_DATA_FORK) sz = xfs_bmdr_space_calc(MINDBTPTRS); else sz = xfs_bmdr_space_calc(MINABTPTRS); maxrootrecs = xfs_bmdr_maxrecs(sz, 0); minleafrecs = mp->m_bmap_dmnr[0]; minnoderecs = mp->m_bmap_dmnr[1]; maxblocks = howmany_64(maxleafents, minleafrecs); for (level = 1; maxblocks > 1; level++) { if (maxblocks <= maxrootrecs) maxblocks = 1; else maxblocks = howmany_64(maxblocks, minnoderecs); } mp->m_bm_maxlevels[whichfork] = level; ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk()); } unsigned int xfs_bmap_compute_attr_offset( struct xfs_mount *mp) { if (mp->m_sb.sb_inodesize == 256) return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS); return xfs_bmdr_space_calc(6 * MINABTPTRS); } STATIC int /* error */ xfs_bmbt_lookup_eq( struct xfs_btree_cur *cur, struct xfs_bmbt_irec *irec, int *stat) /* success/failure */ { cur->bc_rec.b = *irec; return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); } STATIC int /* error */ xfs_bmbt_lookup_first( struct xfs_btree_cur *cur, int *stat) /* success/failure */ { cur->bc_rec.b.br_startoff = 0; cur->bc_rec.b.br_startblock = 0; cur->bc_rec.b.br_blockcount = 0; return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); } /* * Check if the inode needs to be converted to btree format. */ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); return whichfork != XFS_COW_FORK && ifp->if_format == XFS_DINODE_FMT_EXTENTS && ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); } /* * Check if the inode should be converted to extent format. */ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); return whichfork != XFS_COW_FORK && ifp->if_format == XFS_DINODE_FMT_BTREE && ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); } /* * Update the record referred to by cur to the value given by irec * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int xfs_bmbt_update( struct xfs_btree_cur *cur, struct xfs_bmbt_irec *irec) { union xfs_btree_rec rec; xfs_bmbt_disk_set_all(&rec.bmbt, irec); return xfs_btree_update(cur, &rec); } /* * Compute the worst-case number of indirect blocks that will be used * for ip's delayed extent of length "len". */ xfs_filblks_t xfs_bmap_worst_indlen( struct xfs_inode *ip, /* incore inode pointer */ xfs_filblks_t len) /* delayed extent length */ { struct xfs_mount *mp = ip->i_mount; int maxrecs = mp->m_bmap_dmxr[0]; int level; xfs_filblks_t rval; for (level = 0, rval = 0; level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); level++) { len += maxrecs - 1; do_div(len, maxrecs); rval += len; if (len == 1) return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - level - 1; if (level == 0) maxrecs = mp->m_bmap_dmxr[1]; } return rval; } /* * Calculate the default attribute fork offset for newly created inodes. */ uint xfs_default_attroffset( struct xfs_inode *ip) { if (ip->i_df.if_format == XFS_DINODE_FMT_DEV) return roundup(sizeof(xfs_dev_t), 8); return M_IGEO(ip->i_mount)->attr_fork_offset; } /* * Helper routine to reset inode i_forkoff field when switching attribute fork * from local to extent format - we reset it where possible to make space * available for inline data fork extents. */ STATIC void xfs_bmap_forkoff_reset( xfs_inode_t *ip, int whichfork) { if (whichfork == XFS_ATTR_FORK && ip->i_df.if_format != XFS_DINODE_FMT_DEV && ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; if (dfl_forkoff > ip->i_forkoff) ip->i_forkoff = dfl_forkoff; } } static int xfs_bmap_read_buf( struct xfs_mount *mp, /* file system mount point */ struct xfs_trans *tp, /* transaction pointer */ xfs_fsblock_t fsbno, /* file system block number */ struct xfs_buf **bpp) /* buffer for fsbno */ { struct xfs_buf *bp; /* return value */ int error; if (!xfs_verify_fsbno(mp, fsbno)) return -EFSCORRUPTED; error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp, &xfs_bmbt_buf_ops); if (!error) { xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF); *bpp = bp; } return error; } #ifdef DEBUG STATIC struct xfs_buf * xfs_bmap_get_bp( struct xfs_btree_cur *cur, xfs_fsblock_t bno) { struct xfs_log_item *lip; int i; if (!cur) return NULL; for (i = 0; i < cur->bc_maxlevels; i++) { if (!cur->bc_levels[i].bp) break; if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno) return cur->bc_levels[i].bp; } /* Chase down all the log items to see if the bp is there */ list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; if (bip->bli_item.li_type == XFS_LI_BUF && xfs_buf_daddr(bip->bli_buf) == bno) return bip->bli_buf; } return NULL; } STATIC void xfs_check_block( struct xfs_btree_block *block, xfs_mount_t *mp, int root, short sz) { int i, j, dmxr; __be64 *pp, *thispa; /* pointer to block address */ xfs_bmbt_key_t *prevp, *keyp; ASSERT(be16_to_cpu(block->bb_level) > 0); prevp = NULL; for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { dmxr = mp->m_bmap_dmxr[0]; keyp = xfs_bmbt_key_addr(mp, block, i); if (prevp) { ASSERT(be64_to_cpu(prevp->br_startoff) < be64_to_cpu(keyp->br_startoff)); } prevp = keyp; /* * Compare the block numbers to see if there are dups. */ if (root) pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz); else pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr); for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { if (root) thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz); else thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr); if (*thispa == *pp) { xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld", __func__, j, i, (unsigned long long)be64_to_cpu(*thispa)); xfs_err(mp, "%s: ptrs are equal in node\n", __func__); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); } } } } /* * Check that the extents for the inode ip are in the right order in all * btree leaves. THis becomes prohibitively expensive for large extent count * files, so don't bother with inodes that have more than 10,000 extents in * them. The btree record ordering checks will still be done, so for such large * bmapbt constructs that is going to catch most corruptions. */ STATIC void xfs_bmap_check_leaf_extents( struct xfs_btree_cur *cur, /* btree cursor or null */ xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ struct xfs_buf *bp; /* buffer for "block" */ int error; /* error return value */ xfs_extnum_t i=0, j; /* index into the extents list */ int level; /* btree level, for checking */ __be64 *pp; /* pointer to block address */ xfs_bmbt_rec_t *ep; /* pointer to current extent */ xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ xfs_bmbt_rec_t *nextp; /* pointer to next extent */ int bp_release = 0; if (ifp->if_format != XFS_DINODE_FMT_BTREE) return; /* skip large extent count inodes */ if (ip->i_df.if_nextents > 10000) return; bno = NULLFSBLOCK; block = ifp->if_broot; /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. */ level = be16_to_cpu(block->bb_level); ASSERT(level > 0); xfs_check_block(block, mp, 1, ifp->if_broot_bytes); pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLFSBLOCK); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); /* * Go down the tree until leaf level is reached, following the first * pointer (leftmost) at each level. */ while (level-- > 0) { /* See if buf is in cur first */ bp_release = 0; bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); if (!bp) { bp_release = 1; error = xfs_bmap_read_buf(mp, NULL, bno, &bp); if (xfs_metadata_is_sick(error)) xfs_btree_mark_sick(cur); if (error) goto error_norelse; } block = XFS_BUF_TO_BLOCK(bp); if (level == 0) break; /* * Check this block for basic sanity (increasing keys and * no duplicate blocks). */ xfs_check_block(block, mp, 0, 0); pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto error0; } if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); } } /* * Here with bp and block set to the leftmost leaf node in the tree. */ i = 0; /* * Loop over all leaf nodes checking that all extents are in the right order. */ for (;;) { xfs_fsblock_t nextbno; xfs_extnum_t num_recs; num_recs = xfs_btree_get_numrecs(block); /* * Read-ahead the next leaf block, if any. */ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); /* * Check all the extents to make sure they are OK. * If we had a previous block, the last entry should * conform with the first entry in this one. */ ep = xfs_bmbt_rec_addr(mp, block, 1); if (i) { ASSERT(xfs_bmbt_disk_get_startoff(&last) + xfs_bmbt_disk_get_blockcount(&last) <= xfs_bmbt_disk_get_startoff(ep)); } for (j = 1; j < num_recs; j++) { nextp = xfs_bmbt_rec_addr(mp, block, j + 1); ASSERT(xfs_bmbt_disk_get_startoff(ep) + xfs_bmbt_disk_get_blockcount(ep) <= xfs_bmbt_disk_get_startoff(nextp)); ep = nextp; } last = *ep; i += num_recs; if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); } bno = nextbno; /* * If we've reached the end, stop. */ if (bno == NULLFSBLOCK) break; bp_release = 0; bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); if (!bp) { bp_release = 1; error = xfs_bmap_read_buf(mp, NULL, bno, &bp); if (xfs_metadata_is_sick(error)) xfs_btree_mark_sick(cur); if (error) goto error_norelse; } block = XFS_BUF_TO_BLOCK(bp); } return; error0: xfs_warn(mp, "%s: at error0", __func__); if (bp_release) xfs_trans_brelse(NULL, bp); error_norelse: xfs_warn(mp, "%s: BAD after btree leaves for %llu extents", __func__, i); xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return; } /* * Validate that the bmbt_irecs being returned from bmapi are valid * given the caller's original parameters. Specifically check the * ranges of the returned irecs to ensure that they only extend beyond * the given parameters if the XFS_BMAPI_ENTIRE flag was set. */ STATIC void xfs_bmap_validate_ret( xfs_fileoff_t bno, xfs_filblks_t len, uint32_t flags, xfs_bmbt_irec_t *mval, int nmap, int ret_nmap) { int i; /* index to map values */ ASSERT(ret_nmap <= nmap); for (i = 0; i < ret_nmap; i++) { ASSERT(mval[i].br_blockcount > 0); if (!(flags & XFS_BMAPI_ENTIRE)) { ASSERT(mval[i].br_startoff >= bno); ASSERT(mval[i].br_blockcount <= len); ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= bno + len); } else { ASSERT(mval[i].br_startoff < bno + len); ASSERT(mval[i].br_startoff + mval[i].br_blockcount > bno); } ASSERT(i == 0 || mval[i - 1].br_startoff + mval[i - 1].br_blockcount == mval[i].br_startoff); ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && mval[i].br_startblock != HOLESTARTBLOCK); ASSERT(mval[i].br_state == XFS_EXT_NORM || mval[i].br_state == XFS_EXT_UNWRITTEN); } } #else #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) #endif /* DEBUG */ /* * Inode fork format manipulation functions */ /* * Convert the inode format to extent format if it currently is in btree format, * but the extent list is small enough that it fits into the extent format. * * Since the extents are already in-core, all we have to do is give up the space * for the btree root and pitch the leaf block. */ STATIC int /* error */ xfs_bmap_btree_to_extents( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode pointer */ struct xfs_btree_cur *cur, /* btree cursor */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_mount *mp = ip->i_mount; struct xfs_btree_block *rblock = ifp->if_broot; struct xfs_btree_block *cblock;/* child btree block */ xfs_fsblock_t cbno; /* child block number */ struct xfs_buf *cbp; /* child block's buffer */ int error; /* error return value */ __be64 *pp; /* ptr to block address */ struct xfs_owner_info oinfo; /* check if we actually need the extent format first: */ if (!xfs_bmap_wants_extents(ip, whichfork)) return 0; ASSERT(cur); ASSERT(whichfork != XFS_COW_FORK); ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); ASSERT(be16_to_cpu(rblock->bb_level) == 1); ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1); pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes); cbno = be64_to_cpu(*pp); #ifdef DEBUG if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } #endif error = xfs_bmap_read_buf(mp, tp, cbno, &cbp); if (xfs_metadata_is_sick(error)) xfs_btree_mark_sick(cur); if (error) return error; cblock = XFS_BUF_TO_BLOCK(cbp); if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) return error; xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo, XFS_AG_RESV_NONE, 0); if (error) return error; ip->i_nblocks--; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); xfs_trans_binval(tp, cbp); if (cur->bc_levels[0].bp == cbp) cur->bc_levels[0].bp = NULL; xfs_bmap_broot_realloc(ip, whichfork, 0); ASSERT(ifp->if_broot == NULL); ifp->if_format = XFS_DINODE_FMT_EXTENTS; *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); return 0; } /* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ STATIC int /* error */ xfs_bmap_extents_to_btree( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode pointer */ struct xfs_btree_cur **curp, /* cursor returned to caller */ int wasdel, /* converting a delayed alloc */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { struct xfs_btree_block *ablock; /* allocated (child) bt block */ struct xfs_buf *abp; /* buffer for ablock */ struct xfs_alloc_arg args; /* allocation arguments */ struct xfs_bmbt_rec *arp; /* child record pointer */ struct xfs_btree_block *block; /* btree root block */ struct xfs_btree_cur *cur; /* bmap btree cursor */ int error; /* error return value */ struct xfs_ifork *ifp; /* inode fork pointer */ struct xfs_bmbt_key *kp; /* root block key pointer */ struct xfs_mount *mp; /* mount structure */ xfs_bmbt_ptr_t *pp; /* root block address pointer */ struct xfs_iext_cursor icur; struct xfs_bmbt_irec rec; xfs_extnum_t cnt = 0; mp = ip->i_mount; ASSERT(whichfork != XFS_COW_FORK); ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); /* * Make space in the inode incore. This needs to be undone if we fail * to expand the root. */ block = xfs_bmap_broot_realloc(ip, whichfork, 1); /* * Fill in the root. */ xfs_bmbt_init_block(ip, block, NULL, 1, 1); /* * Need a cursor. Can't allocate until bb_level is filled in. */ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); if (wasdel) cur->bc_flags |= XFS_BTREE_BMBT_WASDEL; /* * Convert to a btree with two levels, one record in root. */ ifp->if_format = XFS_DINODE_FMT_BTREE; memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = mp; xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); args.minlen = args.maxlen = args.prod = 1; args.wasdel = wasdel; *logflagsp = 0; error = xfs_alloc_vextent_start_ag(&args, XFS_INO_TO_FSB(mp, ip->i_ino)); if (error) goto out_root_realloc; /* * Allocation can't fail, the space was reserved. */ if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { error = -ENOSPC; goto out_root_realloc; } cur->bc_bmap.allocated++; ip->i_nblocks++; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); error = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, args.fsbno), mp->m_bsize, 0, &abp); if (error) goto out_unreserve_dquot; /* * Fill in the child block. */ ablock = XFS_BUF_TO_BLOCK(abp); xfs_bmbt_init_block(ip, ablock, abp, 0, 0); for_each_xfs_iext(ifp, &icur, &rec) { if (isnullstartblock(rec.br_startblock)) continue; arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt); xfs_bmbt_disk_set_all(arp, &rec); cnt++; } ASSERT(cnt == ifp->if_nextents); xfs_btree_set_numrecs(ablock, cnt); /* * Fill in the root key and pointer. */ kp = xfs_bmbt_key_addr(mp, block, 1); arp = xfs_bmbt_rec_addr(mp, ablock, 1); kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur, be16_to_cpu(block->bb_level))); *pp = cpu_to_be64(args.fsbno); /* * Do all this logging at the end so that * the root is at the right level. */ xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); ASSERT(*curp == NULL); *curp = cur; *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); return 0; out_unreserve_dquot: xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); out_root_realloc: xfs_bmap_broot_realloc(ip, whichfork, 0); ifp->if_format = XFS_DINODE_FMT_EXTENTS; ASSERT(ifp->if_broot == NULL); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } /* * Convert a local file to an extents file. * This code is out of bounds for data forks of regular files, * since the file data needs to get logged so things will stay consistent. * (The bmap-level manipulations are ok, though). */ void xfs_bmap_local_to_extents_empty( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(whichfork != XFS_COW_FORK); ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); ASSERT(ifp->if_bytes == 0); ASSERT(ifp->if_nextents == 0); xfs_bmap_forkoff_reset(ip, whichfork); ifp->if_data = NULL; ifp->if_height = 0; ifp->if_format = XFS_DINODE_FMT_EXTENTS; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } int /* error */ xfs_bmap_local_to_extents( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extlen_t total, /* total blocks needed by transaction */ int *logflagsp, /* inode logging flags */ int whichfork, void (*init_fn)(struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp, void *priv), void *priv) { int error = 0; int flags; /* logging flags returned */ struct xfs_ifork *ifp; /* inode fork pointer */ xfs_alloc_arg_t args; /* allocation arguments */ struct xfs_buf *bp; /* buffer for extent block */ struct xfs_bmbt_irec rec; struct xfs_iext_cursor icur; /* * We don't want to deal with the case of keeping inode data inline yet. * So sending the data fork of a regular inode is invalid. */ ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); if (!ifp->if_bytes) { xfs_bmap_local_to_extents_empty(tp, ip, whichfork); flags = XFS_ILOG_CORE; goto done; } flags = 0; error = 0; memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = ip->i_mount; args.total = total; args.minlen = args.maxlen = args.prod = 1; xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. */ args.total = total; args.minlen = args.maxlen = args.prod = 1; error = xfs_alloc_vextent_start_ag(&args, XFS_INO_TO_FSB(args.mp, ip->i_ino)); if (error) goto done; /* Can't fail, the space was reserved. */ ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(args.len == 1); error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, XFS_FSB_TO_DADDR(args.mp, args.fsbno), args.mp->m_bsize, 0, &bp); if (error) goto done; /* * Initialize the block, copy the data and log the remote buffer. * * The callout is responsible for logging because the remote format * might differ from the local format and thus we don't know how much to * log here. Note that init_fn must also set the buffer log item type * correctly. */ init_fn(tp, bp, ip, ifp, priv); /* account for the change in fork size */ xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); xfs_bmap_local_to_extents_empty(tp, ip, whichfork); flags |= XFS_ILOG_CORE; ifp->if_data = NULL; ifp->if_height = 0; rec.br_startoff = 0; rec.br_startblock = args.fsbno; rec.br_blockcount = 1; rec.br_state = XFS_EXT_NORM; xfs_iext_first(ifp, &icur); xfs_iext_insert(ip, &icur, &rec, 0); ifp->if_nextents = 1; ip->i_nblocks = 1; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); flags |= xfs_ilog_fext(whichfork); done: *logflagsp = flags; return error; } /* * Called from xfs_bmap_add_attrfork to handle btree format files. */ STATIC int /* error */ xfs_bmap_add_attrfork_btree( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ int *flags) /* inode logging flags */ { struct xfs_btree_block *block = ip->i_df.if_broot; struct xfs_btree_cur *cur; /* btree cursor */ int error; /* error return value */ xfs_mount_t *mp; /* file system mount struct */ int stat; /* newroot status */ mp = ip->i_mount; if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip)) *flags |= XFS_ILOG_DBROOT; else { cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); error = xfs_bmbt_lookup_first(cur, &stat); if (error) goto error0; /* must be at least one entry */ if (XFS_IS_CORRUPT(mp, stat != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto error0; } if ((error = xfs_btree_new_iroot(cur, flags, &stat))) goto error0; if (stat == 0) { xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); return -ENOSPC; } cur->bc_bmap.allocated = 0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); } return 0; error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } /* * Called from xfs_bmap_add_attrfork to handle extents format files. */ STATIC int /* error */ xfs_bmap_add_attrfork_extents( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode pointer */ int *flags) /* inode logging flags */ { struct xfs_btree_cur *cur; /* bmap btree cursor */ int error; /* error return value */ if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= xfs_inode_data_fork_size(ip)) return 0; cur = NULL; error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, XFS_DATA_FORK); if (cur) { cur->bc_bmap.allocated = 0; xfs_btree_del_cursor(cur, error); } return error; } /* * Called from xfs_bmap_add_attrfork to handle local format files. Each * different data fork content type needs a different callout to do the * conversion. Some are basic and only require special block initialisation * callouts for the data formating, others (directories) are so specialised they * handle everything themselves. * * XXX (dgc): investigate whether directory conversion can use the generic * formatting callout. It should be possible - it's just a very complex * formatter. */ STATIC int /* error */ xfs_bmap_add_attrfork_local( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode pointer */ int *flags) /* inode logging flags */ { struct xfs_da_args dargs; /* args for dir/attr code */ if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip)) return 0; if (S_ISDIR(VFS_I(ip)->i_mode)) { memset(&dargs, 0, sizeof(dargs)); dargs.geo = ip->i_mount->m_dir_geo; dargs.dp = ip; dargs.total = dargs.geo->fsbcount; dargs.whichfork = XFS_DATA_FORK; dargs.trans = tp; dargs.owner = ip->i_ino; return xfs_dir2_sf_to_block(&dargs); } if (S_ISLNK(VFS_I(ip)->i_mode)) return xfs_bmap_local_to_extents(tp, ip, 1, flags, XFS_DATA_FORK, xfs_symlink_local_to_remote, NULL); /* should only be called for types that support local format data */ ASSERT(0); xfs_bmap_mark_sick(ip, XFS_ATTR_FORK); return -EFSCORRUPTED; } /* * Set an inode attr fork offset based on the format of the data fork. */ static int xfs_bmap_set_attrforkoff( struct xfs_inode *ip, int size) { int default_size = xfs_default_attroffset(ip) >> 3; switch (ip->i_df.if_format) { case XFS_DINODE_FMT_DEV: ip->i_forkoff = default_size; break; case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size); if (!ip->i_forkoff) ip->i_forkoff = default_size; break; default: ASSERT(0); return -EINVAL; } return 0; } /* * Convert inode from non-attributed to attributed. Caller must hold the * ILOCK_EXCL and the file cannot have an attr fork. */ int /* error code */ xfs_bmap_add_attrfork( struct xfs_trans *tp, struct xfs_inode *ip, /* incore inode pointer */ int size, /* space new attribute needs */ int rsvd) /* xact may use reserved blks */ { struct xfs_mount *mp = tp->t_mountp; int logflags; /* logging flags */ int error; /* error return value */ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); if (!xfs_is_metadir_inode(ip)) ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); ASSERT(!xfs_inode_has_attr_fork(ip)); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_bmap_set_attrforkoff(ip, size); if (error) return error; xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); logflags = 0; switch (ip->i_df.if_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); break; case XFS_DINODE_FMT_EXTENTS: error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); break; case XFS_DINODE_FMT_BTREE: error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); break; default: error = 0; break; } if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (error) return error; if (!xfs_has_attr(mp)) { bool log_sb = false; spin_lock(&mp->m_sb_lock); if (!xfs_has_attr(mp)) { xfs_add_attr(mp); xfs_add_attr2(mp); log_sb = true; } spin_unlock(&mp->m_sb_lock); if (log_sb) xfs_log_sb(tp); } return 0; } /* * Internal and external extent tree search functions. */ struct xfs_iread_state { struct xfs_iext_cursor icur; xfs_extnum_t loaded; }; int xfs_bmap_complain_bad_rec( struct xfs_inode *ip, int whichfork, xfs_failaddr_t fa, const struct xfs_bmbt_irec *irec) { struct xfs_mount *mp = ip->i_mount; const char *forkname; switch (whichfork) { case XFS_DATA_FORK: forkname = "data"; break; case XFS_ATTR_FORK: forkname = "attr"; break; case XFS_COW_FORK: forkname = "CoW"; break; default: forkname = "???"; break; } xfs_warn(mp, "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!", ip->i_ino, forkname, fa); xfs_warn(mp, "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x", irec->br_startoff, irec->br_startblock, irec->br_blockcount, irec->br_state); return -EFSCORRUPTED; } /* Stuff every bmbt record from this block into the incore extent map. */ static int xfs_iread_bmbt_block( struct xfs_btree_cur *cur, int level, void *priv) { struct xfs_iread_state *ir = priv; struct xfs_mount *mp = cur->bc_mp; struct xfs_inode *ip = cur->bc_ino.ip; struct xfs_btree_block *block; struct xfs_buf *bp; struct xfs_bmbt_rec *frp; xfs_extnum_t num_recs; xfs_extnum_t j; int whichfork = cur->bc_ino.whichfork; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); block = xfs_btree_get_block(cur, level, &bp); /* Abort if we find more records than nextents. */ num_recs = xfs_btree_get_numrecs(block); if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", (unsigned long long)ip->i_ino); xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, sizeof(*block), __this_address); xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } /* Copy records into the incore cache. */ frp = xfs_bmbt_rec_addr(mp, block, 1); for (j = 0; j < num_recs; j++, frp++, ir->loaded++) { struct xfs_bmbt_irec new; xfs_failaddr_t fa; xfs_bmbt_disk_get_all(frp, &new); fa = xfs_bmap_validate_extent(ip, whichfork, &new); if (fa) { xfs_inode_verifier_error(ip, -EFSCORRUPTED, "xfs_iread_extents(2)", frp, sizeof(*frp), fa); xfs_bmap_mark_sick(ip, whichfork); return xfs_bmap_complain_bad_rec(ip, whichfork, fa, &new); } xfs_iext_insert(ip, &ir->icur, &new, xfs_bmap_fork_to_state(whichfork)); trace_xfs_read_extent(ip, &ir->icur, xfs_bmap_fork_to_state(whichfork), _THIS_IP_); xfs_iext_next(ifp, &ir->icur); } return 0; } /* * Read in extents from a btree-format inode. */ int xfs_iread_extents( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork) { struct xfs_iread_state ir; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_mount *mp = ip->i_mount; struct xfs_btree_cur *cur; int error; if (!xfs_need_iread_extents(ifp)) return 0; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ir.loaded = 0; xfs_iext_first(ifp, &ir.icur); cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block, XFS_BTREE_VISIT_RECORDS, &ir); xfs_btree_del_cursor(cur, error); if (error) goto out; if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { xfs_bmap_mark_sick(ip, whichfork); error = -EFSCORRUPTED; goto out; } ASSERT(ir.loaded == xfs_iext_count(ifp)); /* * Use release semantics so that we can use acquire semantics in * xfs_need_iread_extents and be guaranteed to see a valid mapping tree * after that load. */ smp_store_release(&ifp->if_needextents, 0); return 0; out: if (xfs_metadata_is_sick(error)) xfs_bmap_mark_sick(ip, whichfork); xfs_iext_destroy(ifp); return error; } /* * Returns the relative block number of the first unused block(s) in the given * fork with at least "len" logically contiguous blocks free. This is the * lowest-address hole if the fork has holes, else the first block past the end * of fork. Return 0 if the fork is currently local (in-inode). */ int /* error */ xfs_bmap_first_unused( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_extlen_t len, /* size of hole to find */ xfs_fileoff_t *first_unused, /* unused block */ int whichfork) /* data or attr fork */ { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec got; struct xfs_iext_cursor icur; xfs_fileoff_t lastaddr = 0; xfs_fileoff_t lowest, max; int error; if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { *first_unused = 0; return 0; } ASSERT(xfs_ifork_has_extents(ifp)); error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; lowest = max = *first_unused; for_each_xfs_iext(ifp, &icur, &got) { /* * See if the hole before this extent will work. */ if (got.br_startoff >= lowest + len && got.br_startoff - max >= len) break; lastaddr = got.br_startoff + got.br_blockcount; max = XFS_FILEOFF_MAX(lastaddr, lowest); } *first_unused = max; return 0; } /* * Returns the file-relative block number of the last block - 1 before * last_block (input value) in the file. * This is not based on i_size, it is based on the extent records. * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_before( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t *last_block, /* last block */ int whichfork) /* data or attr fork */ { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec got; struct xfs_iext_cursor icur; int error; switch (ifp->if_format) { case XFS_DINODE_FMT_LOCAL: *last_block = 0; return 0; case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_EXTENTS: break; default: ASSERT(0); xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) *last_block = 0; return 0; } int xfs_bmap_last_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *rec, int *is_empty) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_iext_cursor icur; int error; error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; xfs_iext_last(ifp, &icur); if (!xfs_iext_get_extent(ifp, &icur, rec)) *is_empty = 1; else *is_empty = 0; return 0; } /* * Check the last inode extent to determine whether this allocation will result * in blocks being allocated at the end of the file. When we allocate new data * blocks at the end of the file which do not start at the previous data block, * we will try to align the new blocks at stripe unit boundaries. * * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be * at, or past the EOF. */ STATIC int xfs_bmap_isaeof( struct xfs_bmalloca *bma, int whichfork) { struct xfs_bmbt_irec rec; int is_empty; int error; bma->aeof = false; error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, &is_empty); if (error) return error; if (is_empty) { bma->aeof = true; return 0; } /* * Check if we are allocation or past the last extent, or at least into * the last delayed allocated extent. */ bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || (bma->offset >= rec.br_startoff && isnullstartblock(rec.br_startblock)); return 0; } /* * Returns the file-relative block number of the first block past eof in * the file. This is not based on i_size, it is based on the extent records. * Returns 0 for local files, as they do not have extent records. */ int xfs_bmap_last_offset( struct xfs_inode *ip, xfs_fileoff_t *last_block, int whichfork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec rec; int is_empty; int error; *last_block = 0; if (ifp->if_format == XFS_DINODE_FMT_LOCAL) return 0; if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); if (error || is_empty) return error; *last_block = rec.br_startoff + rec.br_blockcount; return 0; } /* * Extent tree manipulation functions used during allocation. */ static inline bool xfs_bmap_same_rtgroup( struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *left, struct xfs_bmbt_irec *right) { struct xfs_mount *mp = ip->i_mount; if (xfs_ifork_is_realtime(ip, whichfork) && xfs_has_rtgroups(mp)) { if (xfs_rtb_to_rgno(mp, left->br_startblock) != xfs_rtb_to_rgno(mp, right->br_startblock)) return false; } return true; } /* * Convert a delayed allocation to a real allocation. */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( struct xfs_bmalloca *bma, int whichfork) { struct xfs_mount *mp = bma->ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); struct xfs_bmbt_irec *new = &bma->got; int error; /* error return value */ int i; /* temp state */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ int rval=0; /* return value (logging flags) */ uint32_t state = xfs_bmap_fork_to_state(whichfork); xfs_filblks_t da_new; /* new count del alloc blocks used */ xfs_filblks_t da_old; /* old count del alloc blocks used */ xfs_filblks_t temp=0; /* value for da_new calculations */ int tmp_rval; /* partial logging flags */ struct xfs_bmbt_irec old; ASSERT(whichfork != XFS_ATTR_FORK); ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL)); XFS_STATS_INC(mp, xs_add_exlist); #define LEFT r[0] #define RIGHT r[1] #define PREV r[2] /* * Set up a bunch of variables to make the tests simpler. */ xfs_iext_get_extent(ifp, &bma->icur, &PREV); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(isnullstartblock(PREV.br_startblock)); ASSERT(PREV.br_startoff <= new->br_startoff); ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); da_old = startblockval(PREV.br_startblock); da_new = 0; /* * Set flags determining what part of the previous delayed allocation * extent is being replaced by a real allocation. */ if (PREV.br_startoff == new->br_startoff) state |= BMAP_LEFT_FILLING; if (PREV.br_startoff + PREV.br_blockcount == new_endoff) state |= BMAP_RIGHT_FILLING; /* * Check and set flags if this segment has a left neighbor. * Don't set contiguous if the combined extent would be too large. */ if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { state |= BMAP_LEFT_VALID; if (isnullstartblock(LEFT.br_startblock)) state |= BMAP_LEFT_DELAY; } if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && LEFT.br_state == new->br_state && LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN && xfs_bmap_same_rtgroup(bma->ip, whichfork, &LEFT, new)) state |= BMAP_LEFT_CONTIG; /* * Check and set flags if this segment has a right neighbor. * Don't set contiguous if the combined extent would be too large. * Also check for all-three-contiguous being too large. */ if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { state |= BMAP_RIGHT_VALID; if (isnullstartblock(RIGHT.br_startblock)) state |= BMAP_RIGHT_DELAY; } if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new_endoff == RIGHT.br_startoff && new->br_startblock + new->br_blockcount == RIGHT.br_startblock && new->br_state == RIGHT.br_state && new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) != (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING) || LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN) && xfs_bmap_same_rtgroup(bma->ip, whichfork, new, &RIGHT)) state |= BMAP_RIGHT_CONTIG; error = 0; /* * Switch out based on the FILLING and CONTIG state bits. */ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The left and right neighbors are both contiguous with new. */ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; xfs_iext_remove(bma->ip, &bma->icur, state); xfs_iext_remove(bma->ip, &bma->icur, state); xfs_iext_prev(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); ifp->if_nextents--; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_delete(bma->cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_decrement(bma->cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(bma->cur, &LEFT); if (error) goto done; } ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The left neighbor is contiguous, the right is not. */ old = LEFT; LEFT.br_blockcount += PREV.br_blockcount; xfs_iext_remove(bma->ip, &bma->icur, state); xfs_iext_prev(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(bma->cur, &LEFT); if (error) goto done; } ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The right neighbor is contiguous, the left is not. Take care * with delay -> unwritten extent allocation here because the * delalloc record we are overwriting is always written. */ PREV.br_startblock = new->br_startblock; PREV.br_blockcount += RIGHT.br_blockcount; PREV.br_state = new->br_state; xfs_iext_next(ifp, &bma->icur); xfs_iext_remove(bma->ip, &bma->icur, state); xfs_iext_prev(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(bma->cur, &PREV); if (error) goto done; } ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Filling in all of a previously delayed allocation extent. * Neither the left nor right neighbors are contiguous with * the new one. */ PREV.br_startblock = new->br_startblock; PREV.br_state = new->br_state; xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); ifp->if_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_insert(bma->cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } } ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: /* * Filling in the first part of a previous delayed allocation. * The left neighbor is contiguous. */ old = LEFT; temp = PREV.br_blockcount - new->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock)); LEFT.br_blockcount += new->br_blockcount; PREV.br_blockcount = temp; PREV.br_startoff += new->br_blockcount; PREV.br_startblock = nullstartblock(da_new); xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); xfs_iext_prev(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(bma->cur, &LEFT); if (error) goto done; } ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING: /* * Filling in the first part of a previous delayed allocation. * The left neighbor is not contiguous. */ xfs_iext_update_extent(bma->ip, state, &bma->icur, new); ifp->if_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_insert(bma->cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } } if (xfs_bmap_needs_btree(bma->ip, whichfork)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, &bma->cur, 1, &tmp_rval, whichfork); rval |= tmp_rval; if (error) goto done; } temp = PREV.br_blockcount - new->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_bmap.allocated : 0)); PREV.br_startoff = new_endoff; PREV.br_blockcount = temp; PREV.br_startblock = nullstartblock(da_new); xfs_iext_next(ifp, &bma->icur); xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); xfs_iext_prev(ifp, &bma->icur); break; case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in the last part of a previous delayed allocation. * The right neighbor is contiguous with the new allocation. */ old = RIGHT; RIGHT.br_startoff = new->br_startoff; RIGHT.br_startblock = new->br_startblock; RIGHT.br_blockcount += new->br_blockcount; if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(bma->cur, &RIGHT); if (error) goto done; } temp = PREV.br_blockcount - new->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock)); PREV.br_blockcount = temp; PREV.br_startblock = nullstartblock(da_new); xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); xfs_iext_next(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); ASSERT(da_new <= da_old); break; case BMAP_RIGHT_FILLING: /* * Filling in the last part of a previous delayed allocation. * The right neighbor is not contiguous. */ xfs_iext_update_extent(bma->ip, state, &bma->icur, new); ifp->if_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_insert(bma->cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } } if (xfs_bmap_needs_btree(bma->ip, whichfork)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, &bma->cur, 1, &tmp_rval, whichfork); rval |= tmp_rval; if (error) goto done; } temp = PREV.br_blockcount - new->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_bmap.allocated : 0)); PREV.br_startblock = nullstartblock(da_new); PREV.br_blockcount = temp; xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); xfs_iext_next(ifp, &bma->icur); ASSERT(da_new <= da_old); break; case 0: /* * Filling in the middle part of a previous delayed allocation. * Contiguity is impossible here. * This case is avoided almost all the time. * * We start with a delayed allocation: * * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ * PREV @ idx * * and we are allocating: * +rrrrrrrrrrrrrrrrr+ * new * * and we set it up for insertion as: * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ * new * PREV @ idx LEFT RIGHT * inserted at idx + 1 */ old = PREV; /* LEFT is the new middle */ LEFT = *new; /* RIGHT is the new right */ RIGHT.br_state = PREV.br_state; RIGHT.br_startoff = new_endoff; RIGHT.br_blockcount = PREV.br_startoff + PREV.br_blockcount - new_endoff; RIGHT.br_startblock = nullstartblock(xfs_bmap_worst_indlen(bma->ip, RIGHT.br_blockcount)); /* truncate PREV */ PREV.br_blockcount = new->br_startoff - PREV.br_startoff; PREV.br_startblock = nullstartblock(xfs_bmap_worst_indlen(bma->ip, PREV.br_blockcount)); xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); xfs_iext_next(ifp, &bma->icur); xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); ifp->if_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_insert(bma->cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bma->cur); error = -EFSCORRUPTED; goto done; } } if (xfs_bmap_needs_btree(bma->ip, whichfork)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, &bma->cur, 1, &tmp_rval, whichfork); rval |= tmp_rval; if (error) goto done; } da_new = startblockval(PREV.br_startblock) + startblockval(RIGHT.br_startblock); break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_CONTIG: case BMAP_RIGHT_CONTIG: /* * These cases are all impossible. */ ASSERT(0); } /* add reverse mapping unless caller opted out */ if (!(bma->flags & XFS_BMAPI_NORMAP)) xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(bma->ip, whichfork)) { int tmp_logflags; /* partial log flag return val */ ASSERT(bma->cur == NULL); error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, &bma->cur, da_old > 0, &tmp_logflags, whichfork); bma->logflags |= tmp_logflags; if (error) goto done; } if (da_new != da_old) xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old); if (bma->cur) { da_new += bma->cur->bc_bmap.allocated; bma->cur->bc_bmap.allocated = 0; } /* adjust for changes in reserved delayed indirect blocks */ if (da_new < da_old) xfs_add_fdblocks(mp, da_old - da_new); else if (da_new > da_old) error = xfs_dec_fdblocks(mp, da_new - da_old, true); xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); done: if (whichfork != XFS_COW_FORK) bma->logflags |= rval; return error; #undef LEFT #undef RIGHT #undef PREV } /* * Convert an unwritten allocation to a real allocation or vice versa. */ int /* error */ xfs_bmap_add_extent_unwritten_real( struct xfs_trans *tp, xfs_inode_t *ip, /* incore inode pointer */ int whichfork, struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp, /* if *curp is null, not a btree */ xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp) /* inode logging flags */ { struct xfs_btree_cur *cur; /* btree cursor */ int error; /* error return value */ int i; /* temp state */ struct xfs_ifork *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ int rval=0; /* return value (logging flags) */ uint32_t state = xfs_bmap_fork_to_state(whichfork); struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec old; *logflagsp = 0; cur = *curp; ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(!isnullstartblock(new->br_startblock)); XFS_STATS_INC(mp, xs_add_exlist); #define LEFT r[0] #define RIGHT r[1] #define PREV r[2] /* * Set up a bunch of variables to make the tests simpler. */ error = 0; xfs_iext_get_extent(ifp, icur, &PREV); ASSERT(new->br_state != PREV.br_state); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); /* * Set flags determining what part of the previous oldext allocation * extent is being replaced by a newext allocation. */ if (PREV.br_startoff == new->br_startoff) state |= BMAP_LEFT_FILLING; if (PREV.br_startoff + PREV.br_blockcount == new_endoff) state |= BMAP_RIGHT_FILLING; /* * Check and set flags if this segment has a left neighbor. * Don't set contiguous if the combined extent would be too large. */ if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { state |= BMAP_LEFT_VALID; if (isnullstartblock(LEFT.br_startblock)) state |= BMAP_LEFT_DELAY; } if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && LEFT.br_state == new->br_state && LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN && xfs_bmap_same_rtgroup(ip, whichfork, &LEFT, new)) state |= BMAP_LEFT_CONTIG; /* * Check and set flags if this segment has a right neighbor. * Don't set contiguous if the combined extent would be too large. * Also check for all-three-contiguous being too large. */ if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { state |= BMAP_RIGHT_VALID; if (isnullstartblock(RIGHT.br_startblock)) state |= BMAP_RIGHT_DELAY; } if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new_endoff == RIGHT.br_startoff && new->br_startblock + new->br_blockcount == RIGHT.br_startblock && new->br_state == RIGHT.br_state && new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) != (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING) || LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN) && xfs_bmap_same_rtgroup(ip, whichfork, new, &RIGHT)) state |= BMAP_RIGHT_CONTIG; /* * Switch out based on the FILLING and CONTIG state bits. */ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left and right neighbors are both contiguous with new. */ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; xfs_iext_remove(ip, icur, state); xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &LEFT); ifp->if_nextents -= 2; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_delete(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_delete(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &LEFT); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left neighbor is contiguous, the right is not. */ LEFT.br_blockcount += PREV.br_blockcount; xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &LEFT); ifp->if_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &PREV, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_delete(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &LEFT); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The right neighbor is contiguous, the left is not. */ PREV.br_blockcount += RIGHT.br_blockcount; PREV.br_state = new->br_state; xfs_iext_next(ifp, icur); xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &PREV); ifp->if_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_delete(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Setting all of a previous oldext extent to newext. * Neither the left nor right neighbors are contiguous with * the new one. */ PREV.br_state = new->br_state; xfs_iext_update_extent(ip, state, icur, &PREV); if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is contiguous. */ LEFT.br_blockcount += new->br_blockcount; old = PREV; PREV.br_startoff += new->br_blockcount; PREV.br_startblock += new->br_blockcount; PREV.br_blockcount -= new->br_blockcount; xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &LEFT); if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; error = xfs_bmbt_update(cur, &LEFT); if (error) goto done; } break; case BMAP_LEFT_FILLING: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is not contiguous. */ old = PREV; PREV.br_startoff += new->br_blockcount; PREV.br_startblock += new->br_blockcount; PREV.br_blockcount -= new->br_blockcount; xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_insert(ip, icur, new, state); ifp->if_nextents++; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; cur->bc_rec.b = *new; if ((error = xfs_btree_insert(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } } break; case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is contiguous with the new allocation. */ old = PREV; PREV.br_blockcount -= new->br_blockcount; RIGHT.br_startoff = new->br_startoff; RIGHT.br_startblock = new->br_startblock; RIGHT.br_blockcount += new->br_blockcount; xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_next(ifp, icur); xfs_iext_update_extent(ip, state, icur, &RIGHT); if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; error = xfs_btree_increment(cur, 0, &i); if (error) goto done; error = xfs_bmbt_update(cur, &RIGHT); if (error) goto done; } break; case BMAP_RIGHT_FILLING: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is not contiguous. */ old = PREV; PREV.br_blockcount -= new->br_blockcount; xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, new, state); ifp->if_nextents++; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &PREV); if (error) goto done; error = xfs_bmbt_lookup_eq(cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } if ((error = xfs_btree_insert(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } } break; case 0: /* * Setting the middle part of a previous oldext extent to * newext. Contiguity is impossible here. * One extent becomes three extents. */ old = PREV; PREV.br_blockcount = new->br_startoff - PREV.br_startoff; r[0] = *new; r[1].br_startoff = new_endoff; r[1].br_blockcount = old.br_startoff + old.br_blockcount - new_endoff; r[1].br_startblock = new->br_startblock + new->br_blockcount; r[1].br_state = PREV.br_state; xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &r[1], state); xfs_iext_insert(ip, icur, &r[0], state); ifp->if_nextents += 2; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } /* new right extent - oldext */ error = xfs_bmbt_update(cur, &r[1]); if (error) goto done; /* new left extent - oldext */ cur->bc_rec.b = PREV; if ((error = xfs_btree_insert(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } /* * Reset the cursor to the position of the new extent * we are about to insert as we can't trust it after * the previous insert. */ error = xfs_bmbt_lookup_eq(cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } /* new middle extent - newext */ if ((error = xfs_btree_insert(cur, &i))) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } } break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_CONTIG: case BMAP_RIGHT_CONTIG: /* * These cases are all impossible. */ ASSERT(0); } /* update reverse mappings */ xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(ip, whichfork)) { int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, &tmp_logflags, whichfork); *logflagsp |= tmp_logflags; if (error) goto done; } /* clear out the allocated field, done with it now in any case. */ if (cur) { cur->bc_bmap.allocated = 0; *curp = cur; } xfs_bmap_check_leaf_extents(*curp, ip, whichfork); done: *logflagsp |= rval; return error; #undef LEFT #undef RIGHT #undef PREV } /* * Convert a hole to a real allocation. */ STATIC int /* error */ xfs_bmap_add_extent_hole_real( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp, struct xfs_bmbt_irec *new, int *logflagsp, uint32_t flags) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_mount *mp = ip->i_mount; struct xfs_btree_cur *cur = *curp; int error; /* error return value */ int i; /* temp state */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_bmbt_irec_t right; /* right neighbor extent entry */ int rval=0; /* return value (logging flags) */ uint32_t state = xfs_bmap_fork_to_state(whichfork); struct xfs_bmbt_irec old; ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL)); XFS_STATS_INC(mp, xs_add_exlist); /* * Check and set flags if this segment has a left neighbor. */ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { state |= BMAP_LEFT_VALID; if (isnullstartblock(left.br_startblock)) state |= BMAP_LEFT_DELAY; } /* * Check and set flags if this segment has a current value. * Not true if we're inserting into the "hole" at eof. */ if (xfs_iext_get_extent(ifp, icur, &right)) { state |= BMAP_RIGHT_VALID; if (isnullstartblock(right.br_startblock)) state |= BMAP_RIGHT_DELAY; } /* * We're inserting a real allocation between "left" and "right". * Set the contiguity flags. Don't let extents get too large. */ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && left.br_startoff + left.br_blockcount == new->br_startoff && left.br_startblock + left.br_blockcount == new->br_startblock && left.br_state == new->br_state && left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN && xfs_bmap_same_rtgroup(ip, whichfork, &left, new)) state |= BMAP_LEFT_CONTIG; if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new->br_startoff + new->br_blockcount == right.br_startoff && new->br_startblock + new->br_blockcount == right.br_startblock && new->br_state == right.br_state && new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && (!(state & BMAP_LEFT_CONTIG) || left.br_blockcount + new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN) && xfs_bmap_same_rtgroup(ip, whichfork, new, &right)) state |= BMAP_RIGHT_CONTIG; error = 0; /* * Select which case we're in here, and implement it. */ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with real allocations on the * left and on the right. * Merge all three into a single extent record. */ left.br_blockcount += new->br_blockcount + right.br_blockcount; xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &left); ifp->if_nextents--; if (cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, &right, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_delete(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &left); if (error) goto done; } break; case BMAP_LEFT_CONTIG: /* * New allocation is contiguous with a real allocation * on the left. * Merge the new allocation with the left neighbor. */ old = left; left.br_blockcount += new->br_blockcount; xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &left); if (cur == NULL) { rval = xfs_ilog_fext(whichfork); } else { rval = 0; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &left); if (error) goto done; } break; case BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with a real allocation * on the right. * Merge the new allocation with the right neighbor. */ old = right; right.br_startoff = new->br_startoff; right.br_startblock = new->br_startblock; right.br_blockcount += new->br_blockcount; xfs_iext_update_extent(ip, state, icur, &right); if (cur == NULL) { rval = xfs_ilog_fext(whichfork); } else { rval = 0; error = xfs_bmbt_lookup_eq(cur, &old, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_bmbt_update(cur, &right); if (error) goto done; } break; case 0: /* * New allocation is not contiguous with another * real allocation. * Insert a new entry. */ xfs_iext_insert(ip, icur, new, state); ifp->if_nextents++; if (cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(cur, new, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } error = xfs_btree_insert(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto done; } } break; } /* add reverse mapping unless caller opted out */ if (!(flags & XFS_BMAPI_NORMAP)) xfs_rmap_map_extent(tp, ip, whichfork, new); /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(ip, whichfork)) { int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, &tmp_logflags, whichfork); *logflagsp |= tmp_logflags; cur = *curp; if (error) goto done; } /* clear out the allocated field, done with it now in any case. */ if (cur) cur->bc_bmap.allocated = 0; xfs_bmap_check_leaf_extents(cur, ip, whichfork); done: *logflagsp |= rval; return error; } /* * Functions used in the extent read, allocate and remove paths */ /* * Adjust the size of the new extent based on i_extsize and rt extsize. */ int xfs_bmap_extsize_align( xfs_mount_t *mp, xfs_bmbt_irec_t *gotp, /* next extent pointer */ xfs_bmbt_irec_t *prevp, /* previous extent pointer */ xfs_extlen_t extsz, /* align to this extent size */ int rt, /* is this a realtime inode? */ int eof, /* is extent at end-of-file? */ int delay, /* creating delalloc extent? */ int convert, /* overwriting unwritten extent? */ xfs_fileoff_t *offp, /* in/out: aligned offset */ xfs_extlen_t *lenp) /* in/out: aligned length */ { xfs_fileoff_t orig_off; /* original offset */ xfs_extlen_t orig_alen; /* original length */ xfs_fileoff_t orig_end; /* original off+len */ xfs_fileoff_t nexto; /* next file offset */ xfs_fileoff_t prevo; /* previous file offset */ xfs_fileoff_t align_off; /* temp for offset */ xfs_extlen_t align_alen; /* temp for length */ xfs_extlen_t temp; /* temp for calculations */ if (convert) return 0; orig_off = align_off = *offp; orig_alen = align_alen = *lenp; orig_end = orig_off + orig_alen; /* * If this request overlaps an existing extent, then don't * attempt to perform any additional alignment. */ if (!delay && !eof && (orig_off >= gotp->br_startoff) && (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { return 0; } /* * If the file offset is unaligned vs. the extent size * we need to align it. This will be possible unless * the file was previously written with a kernel that didn't * perform this alignment, or if a truncate shot us in the * foot. */ div_u64_rem(orig_off, extsz, &temp); if (temp) { align_alen += temp; align_off -= temp; } /* Same adjustment for the end of the requested area. */ temp = (align_alen % extsz); if (temp) align_alen += extsz - temp; /* * For large extent hint sizes, the aligned extent might be larger than * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer * allocation loops handle short allocation just fine, so it is safe to * do this. We only want to do it when we are forced to, though, because * it means more allocation operations are required. */ while (align_alen > XFS_MAX_BMBT_EXTLEN) align_alen -= extsz; ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN); /* * If the previous block overlaps with this proposed allocation * then move the start forward without adjusting the length. */ if (prevp->br_startoff != NULLFILEOFF) { if (prevp->br_startblock == HOLESTARTBLOCK) prevo = prevp->br_startoff; else prevo = prevp->br_startoff + prevp->br_blockcount; } else prevo = 0; if (align_off != orig_off && align_off < prevo) align_off = prevo; /* * If the next block overlaps with this proposed allocation * then move the start back without adjusting the length, * but not before offset 0. * This may of course make the start overlap previous block, * and if we hit the offset 0 limit then the next block * can still overlap too. */ if (!eof && gotp->br_startoff != NULLFILEOFF) { if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) nexto = gotp->br_startoff + gotp->br_blockcount; else nexto = gotp->br_startoff; } else nexto = NULLFILEOFF; if (!eof && align_off + align_alen != orig_end && align_off + align_alen > nexto) align_off = nexto > align_alen ? nexto - align_alen : 0; /* * If we're now overlapping the next or previous extent that * means we can't fit an extsz piece in this hole. Just move * the start forward to the first valid spot and set * the length so we hit the end. */ if (align_off != orig_off && align_off < prevo) align_off = prevo; if (align_off + align_alen != orig_end && align_off + align_alen > nexto && nexto != NULLFILEOFF) { ASSERT(nexto > prevo); align_alen = nexto - align_off; } /* * If realtime, and the result isn't a multiple of the realtime * extent size we need to remove blocks until it is. */ if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) { /* * We're not covering the original request, or * we won't be able to once we fix the length. */ if (orig_off < align_off || orig_end > align_off + align_alen || align_alen - temp < orig_alen) return -EINVAL; /* * Try to fix it by moving the start up. */ if (align_off + temp <= orig_off) { align_alen -= temp; align_off += temp; } /* * Try to fix it by moving the end in. */ else if (align_off + align_alen - temp >= orig_end) align_alen -= temp; /* * Set the start to the minimum then trim the length. */ else { align_alen -= orig_off - align_off; align_off = orig_off; align_alen -= xfs_extlen_to_rtxmod(mp, align_alen); } /* * Result doesn't cover the request, fail it. */ if (orig_off < align_off || orig_end > align_off + align_alen) return -EINVAL; } else { ASSERT(orig_off >= align_off); /* see XFS_BMBT_MAX_EXTLEN handling above */ ASSERT(orig_end <= align_off + align_alen || align_alen + extsz > XFS_MAX_BMBT_EXTLEN); } #ifdef DEBUG if (!eof && gotp->br_startoff != NULLFILEOFF) ASSERT(align_off + align_alen <= gotp->br_startoff); if (prevp->br_startoff != NULLFILEOFF) ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); #endif *lenp = align_alen; *offp = align_off; return 0; } static inline bool xfs_bmap_adjacent_valid( struct xfs_bmalloca *ap, xfs_fsblock_t x, xfs_fsblock_t y) { struct xfs_mount *mp = ap->ip->i_mount; if (XFS_IS_REALTIME_INODE(ap->ip) && (ap->datatype & XFS_ALLOC_USERDATA)) { if (!xfs_has_rtgroups(mp)) return x < mp->m_sb.sb_rblocks; return xfs_rtb_to_rgno(mp, x) == xfs_rtb_to_rgno(mp, y) && xfs_rtb_to_rgno(mp, x) < mp->m_sb.sb_rgcount && xfs_rtb_to_rtx(mp, x) < mp->m_sb.sb_rgextents; } return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks; } #define XFS_ALLOC_GAP_UNITS 4 /* returns true if ap->blkno was modified */ bool xfs_bmap_adjacent( struct xfs_bmalloca *ap) /* bmap alloc argument struct */ { xfs_fsblock_t adjust; /* adjustment to block numbers */ /* * If allocating at eof, and there's a previous real block, * try to use its last block as our starting point. */ if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && !isnullstartblock(ap->prev.br_startblock) && xfs_bmap_adjacent_valid(ap, ap->prev.br_startblock + ap->prev.br_blockcount, ap->prev.br_startblock)) { ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; /* * Adjust for the gap between prevp and us. */ adjust = ap->offset - (ap->prev.br_startoff + ap->prev.br_blockcount); if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust, ap->prev.br_startblock)) ap->blkno += adjust; return true; } /* * If not at eof, then compare the two neighbor blocks. * Figure out whether either one gives us a good starting point, * and pick the better one. */ if (!ap->eof) { xfs_fsblock_t gotbno; /* right side block number */ xfs_fsblock_t gotdiff=0; /* right side difference */ xfs_fsblock_t prevbno; /* left side block number */ xfs_fsblock_t prevdiff=0; /* left side difference */ /* * If there's a previous (left) block, select a requested * start block based on it. */ if (ap->prev.br_startoff != NULLFILEOFF && !isnullstartblock(ap->prev.br_startblock) && (prevbno = ap->prev.br_startblock + ap->prev.br_blockcount) && xfs_bmap_adjacent_valid(ap, prevbno, ap->prev.br_startblock)) { /* * Calculate gap to end of previous block. */ adjust = prevdiff = ap->offset - (ap->prev.br_startoff + ap->prev.br_blockcount); /* * Figure the startblock based on the previous block's * end and the gap size. * Heuristic! * If the gap is large relative to the piece we're * allocating, or using it gives us an invalid block * number, then just use the end of the previous block. */ if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && xfs_bmap_adjacent_valid(ap, prevbno + prevdiff, ap->prev.br_startblock)) prevbno += adjust; else prevdiff += adjust; } /* * No previous block or can't follow it, just default. */ else prevbno = NULLFSBLOCK; /* * If there's a following (right) block, select a requested * start block based on it. */ if (!isnullstartblock(ap->got.br_startblock)) { /* * Calculate gap to start of next block. */ adjust = gotdiff = ap->got.br_startoff - ap->offset; /* * Figure the startblock based on the next block's * start and the gap size. */ gotbno = ap->got.br_startblock; /* * Heuristic! * If the gap is large relative to the piece we're * allocating, or using it gives us an invalid block * number, then just use the start of the next block * offset by our length. */ if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && xfs_bmap_adjacent_valid(ap, gotbno - gotdiff, gotbno)) gotbno -= adjust; else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length, gotbno)) { gotbno -= ap->length; gotdiff += adjust - ap->length; } else gotdiff += adjust; } /* * No next block, just default. */ else gotbno = NULLFSBLOCK; /* * If both valid, pick the better one, else the only good * one, else ap->blkno is already set (to 0 or the inode block). */ if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) { ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; return true; } if (prevbno != NULLFSBLOCK) { ap->blkno = prevbno; return true; } if (gotbno != NULLFSBLOCK) { ap->blkno = gotbno; return true; } } return false; } int xfs_bmap_longest_free_extent( struct xfs_perag *pag, struct xfs_trans *tp, xfs_extlen_t *blen) { xfs_extlen_t longest; int error = 0; if (!xfs_perag_initialised_agf(pag)) { error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK, NULL); if (error) return error; } longest = xfs_alloc_longest_free_extent(pag, xfs_alloc_min_freelist(pag_mount(pag), pag), xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); if (*blen < longest) *blen = longest; return 0; } static xfs_extlen_t xfs_bmap_select_minlen( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t blen) { /* * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is * possible that there is enough contiguous free space for this request. */ if (blen < ap->minlen) return ap->minlen; /* * If the best seen length is less than the request length, * use the best as the minimum, otherwise we've got the maxlen we * were asked for. */ if (blen < args->maxlen) return blen; return args->maxlen; } static int xfs_bmap_btalloc_select_lengths( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen) { struct xfs_mount *mp = args->mp; struct xfs_perag *pag; xfs_agnumber_t agno, startag; int error = 0; if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { args->total = ap->minlen; args->minlen = ap->minlen; return 0; } args->total = ap->total; startag = XFS_FSB_TO_AGNO(mp, ap->blkno); if (startag == NULLAGNUMBER) startag = 0; *blen = 0; for_each_perag_wrap(mp, startag, agno, pag) { error = xfs_bmap_longest_free_extent(pag, args->tp, blen); if (error && error != -EAGAIN) break; error = 0; if (*blen >= args->maxlen) break; } if (pag) xfs_perag_rele(pag); args->minlen = xfs_bmap_select_minlen(ap, args, *blen); return error; } /* Update all inode and quota accounting for the allocation we just did. */ void xfs_bmap_alloc_account( struct xfs_bmalloca *ap) { bool isrt = XFS_IS_REALTIME_INODE(ap->ip) && !(ap->flags & XFS_BMAPI_ATTRFORK); uint fld; if (ap->flags & XFS_BMAPI_COWFORK) { /* * COW fork blocks are in-core only and thus are treated as * in-core quota reservation (like delalloc blocks) even when * converted to real blocks. The quota reservation is not * accounted to disk until blocks are remapped to the data * fork. So if these blocks were previously delalloc, we * already have quota reservation and there's nothing to do * yet. */ if (ap->wasdel) { xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0); return; } /* * Otherwise, we've allocated blocks in a hole. The transaction * has acquired in-core quota reservation for this extent. * Rather than account these as real blocks, however, we reduce * the transaction quota reservation based on the allocation. * This essentially transfers the transaction quota reservation * to that of a delalloc extent. */ ap->ip->i_delayed_blks += ap->length; xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS, -(long)ap->length); return; } /* data/attr fork only */ ap->ip->i_nblocks += ap->length; xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); if (ap->wasdel) { ap->ip->i_delayed_blks -= ap->length; xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0); fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT; } else { fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; } xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length); } static int xfs_bmap_compute_alignments( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args) { struct xfs_mount *mp = args->mp; xfs_extlen_t align = 0; /* minimum allocation alignment */ int stripe_align = 0; /* stripe alignment for allocation is determined by mount parameters */ if (mp->m_swidth && xfs_has_swalloc(mp)) stripe_align = mp->m_swidth; else if (mp->m_dalign) stripe_align = mp->m_dalign; if (ap->flags & XFS_BMAPI_COWFORK) align = xfs_get_cowextsz_hint(ap->ip); else if (ap->datatype & XFS_ALLOC_USERDATA) align = xfs_get_extsz_hint(ap->ip); /* Try to align start block to any minimum allocation alignment */ if (align > 1 && (ap->flags & XFS_BMAPI_EXTSZALIGN)) args->alignment = align; if (align) { if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, ap->eof, 0, ap->conv, &ap->offset, &ap->length)) ASSERT(0); ASSERT(ap->length); } /* apply extent size hints if obtained earlier */ if (align) { args->prod = align; div_u64_rem(ap->offset, args->prod, &args->mod); if (args->mod) args->mod = args->prod - args->mod; } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { args->prod = 1; args->mod = 0; } else { args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; div_u64_rem(ap->offset, args->prod, &args->mod); if (args->mod) args->mod = args->prod - args->mod; } return stripe_align; } static void xfs_bmap_process_allocated_extent( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_fileoff_t orig_offset, xfs_extlen_t orig_length) { ap->blkno = args->fsbno; ap->length = args->len; /* * If the extent size hint is active, we tried to round the * caller's allocation request offset down to extsz and the * length up to another extsz boundary. If we found a free * extent we mapped it in starting at this new offset. If the * newly mapped space isn't long enough to cover any of the * range of offsets that was originally requested, move the * mapping up so that we can fill as much of the caller's * original request as possible. Free space is apparently * very fragmented so we're unlikely to be able to satisfy the * hints anyway. */ if (ap->length <= orig_length) ap->offset = orig_offset; else if (ap->offset + ap->length < orig_offset + orig_length) ap->offset = orig_offset + orig_length - ap->length; xfs_bmap_alloc_account(ap); } static int xfs_bmap_exact_minlen_extent_alloc( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args) { if (ap->minlen != 1) { args->fsbno = NULLFSBLOCK; return 0; } args->alloc_minlen_only = 1; args->minlen = args->maxlen = ap->minlen; args->total = ap->total; /* * Unlike the longest extent available in an AG, we don't track * the length of an AG's shortest extent. * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and * hence we can afford to start traversing from the 0th AG since * we need not be concerned about a drop in performance in * "debug only" code paths. */ ap->blkno = XFS_AGB_TO_FSB(ap->ip->i_mount, 0, 0); /* * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG * iteration and then drops args->total to args->minlen, which might be * required to find an allocation for the transaction reservation when * the file system is very full. */ return xfs_bmap_btalloc_low_space(ap, args); } /* * If we are not low on available data blocks and we are allocating at * EOF, optimise allocation for contiguous file extension and/or stripe * alignment of the new extent. * * NOTE: ap->aeof is only set if the allocation length is >= the * stripe unit and the allocation offset is at the end of file. */ static int xfs_bmap_btalloc_at_eof( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t blen, int stripe_align, bool ag_only) { struct xfs_mount *mp = args->mp; struct xfs_perag *caller_pag = args->pag; int error; /* * If there are already extents in the file, and xfs_bmap_adjacent() has * given a better blkno, try an exact EOF block allocation to extend the * file as a contiguous extent. If that fails, or it's the first * allocation in a file, just try for a stripe aligned allocation. */ if (ap->eof) { xfs_extlen_t nextminlen = 0; /* * Compute the minlen+alignment for the next case. Set slop so * that the value of minlen+alignment+slop doesn't go up between * the calls. */ args->alignment = 1; if (blen > stripe_align && blen <= args->maxlen) nextminlen = blen - stripe_align; else nextminlen = args->minlen; if (nextminlen + stripe_align > args->minlen + 1) args->minalignslop = nextminlen + stripe_align - args->minlen - 1; else args->minalignslop = 0; if (!caller_pag) args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno)); error = xfs_alloc_vextent_exact_bno(args, ap->blkno); if (!caller_pag) { xfs_perag_put(args->pag); args->pag = NULL; } if (error) return error; if (args->fsbno != NULLFSBLOCK) return 0; /* * Exact allocation failed. Reset to try an aligned allocation * according to the original allocation specification. */ args->alignment = stripe_align; args->minlen = nextminlen; args->minalignslop = 0; } else { /* * Adjust minlen to try and preserve alignment if we * can't guarantee an aligned maxlen extent. */ args->alignment = stripe_align; if (blen > args->alignment && blen <= args->maxlen + args->alignment) args->minlen = blen - args->alignment; args->minalignslop = 0; } if (ag_only) { error = xfs_alloc_vextent_near_bno(args, ap->blkno); } else { args->pag = NULL; error = xfs_alloc_vextent_start_ag(args, ap->blkno); ASSERT(args->pag == NULL); args->pag = caller_pag; } if (error) return error; if (args->fsbno != NULLFSBLOCK) return 0; /* * Allocation failed, so turn return the allocation args to their * original non-aligned state so the caller can proceed on allocation * failure as if this function was never called. */ args->alignment = 1; return 0; } /* * We have failed multiple allocation attempts so now are in a low space * allocation situation. Try a locality first full filesystem minimum length * allocation whilst still maintaining necessary total block reservation * requirements. * * If that fails, we are now critically low on space, so perform a last resort * allocation attempt: no reserve, no locality, blocking, minimum length, full * filesystem free space scan. We also indicate to future allocations in this * transaction that we are critically low on space so they don't waste time on * allocation modes that are unlikely to succeed. */ int xfs_bmap_btalloc_low_space( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args) { int error; if (args->minlen > ap->minlen) { args->minlen = ap->minlen; error = xfs_alloc_vextent_start_ag(args, ap->blkno); if (error || args->fsbno != NULLFSBLOCK) return error; } /* Last ditch attempt before failure is declared. */ args->total = ap->minlen; error = xfs_alloc_vextent_first_ag(args, 0); if (error) return error; ap->tp->t_flags |= XFS_TRANS_LOWMODE; return 0; } static int xfs_bmap_btalloc_filestreams( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, int stripe_align) { xfs_extlen_t blen = 0; int error = 0; error = xfs_filestream_select_ag(ap, args, &blen); if (error) return error; ASSERT(args->pag); /* * If we are in low space mode, then optimal allocation will fail so * prepare for minimal allocation and jump to the low space algorithm * immediately. */ if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { args->minlen = ap->minlen; ASSERT(args->fsbno == NULLFSBLOCK); goto out_low_space; } args->minlen = xfs_bmap_select_minlen(ap, args, blen); if (ap->aeof) error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, true); if (!error && args->fsbno == NULLFSBLOCK) error = xfs_alloc_vextent_near_bno(args, ap->blkno); out_low_space: /* * We are now done with the perag reference for the filestreams * association provided by xfs_filestream_select_ag(). Release it now as * we've either succeeded, had a fatal error or we are out of space and * need to do a full filesystem scan for free space which will take it's * own references. */ xfs_perag_rele(args->pag); args->pag = NULL; if (error || args->fsbno != NULLFSBLOCK) return error; return xfs_bmap_btalloc_low_space(ap, args); } static int xfs_bmap_btalloc_best_length( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, int stripe_align) { xfs_extlen_t blen = 0; int error; ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino); if (!xfs_bmap_adjacent(ap)) ap->eof = false; /* * Search for an allocation group with a single extent large enough for * the request. If one isn't found, then adjust the minimum allocation * size to the largest space found. */ error = xfs_bmap_btalloc_select_lengths(ap, args, &blen); if (error) return error; /* * Don't attempt optimal EOF allocation if previous allocations barely * succeeded due to being near ENOSPC. It is highly unlikely we'll get * optimal or even aligned allocations in this case, so don't waste time * trying. */ if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) { error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, false); if (error || args->fsbno != NULLFSBLOCK) return error; } error = xfs_alloc_vextent_start_ag(args, ap->blkno); if (error || args->fsbno != NULLFSBLOCK) return error; return xfs_bmap_btalloc_low_space(ap, args); } static int xfs_bmap_btalloc( struct xfs_bmalloca *ap) { struct xfs_mount *mp = ap->ip->i_mount; struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp, .fsbno = NULLFSBLOCK, .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, .minleft = ap->minleft, .wasdel = ap->wasdel, .resv = XFS_AG_RESV_NONE, .datatype = ap->datatype, .alignment = 1, .minalignslop = 0, }; xfs_fileoff_t orig_offset; xfs_extlen_t orig_length; int error; int stripe_align; ASSERT(ap->length); orig_offset = ap->offset; orig_length = ap->length; stripe_align = xfs_bmap_compute_alignments(ap, &args); /* Trim the allocation back to the maximum an AG can fit. */ args.maxlen = min(ap->length, mp->m_ag_max_usable); if (unlikely(XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) error = xfs_bmap_exact_minlen_extent_alloc(ap, &args); else if ((ap->datatype & XFS_ALLOC_USERDATA) && xfs_inode_is_filestream(ap->ip)) error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align); else error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align); if (error) return error; if (args.fsbno != NULLFSBLOCK) { xfs_bmap_process_allocated_extent(ap, &args, orig_offset, orig_length); } else { ap->blkno = NULLFSBLOCK; ap->length = 0; } return 0; } /* Trim extent to fit a logical block range. */ void xfs_trim_extent( struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, xfs_filblks_t len) { xfs_fileoff_t distance; xfs_fileoff_t end = bno + len; if (irec->br_startoff + irec->br_blockcount <= bno || irec->br_startoff >= end) { irec->br_blockcount = 0; return; } if (irec->br_startoff < bno) { distance = bno - irec->br_startoff; if (isnullstartblock(irec->br_startblock)) irec->br_startblock = DELAYSTARTBLOCK; if (irec->br_startblock != DELAYSTARTBLOCK && irec->br_startblock != HOLESTARTBLOCK) irec->br_startblock += distance; irec->br_startoff += distance; irec->br_blockcount -= distance; } if (end < irec->br_startoff + irec->br_blockcount) { distance = irec->br_startoff + irec->br_blockcount - end; irec->br_blockcount -= distance; } } /* * Trim the returned map to the required bounds */ STATIC void xfs_bmapi_trim_map( struct xfs_bmbt_irec *mval, struct xfs_bmbt_irec *got, xfs_fileoff_t *bno, xfs_filblks_t len, xfs_fileoff_t obno, xfs_fileoff_t end, int n, uint32_t flags) { if ((flags & XFS_BMAPI_ENTIRE) || got->br_startoff + got->br_blockcount <= obno) { *mval = *got; if (isnullstartblock(got->br_startblock)) mval->br_startblock = DELAYSTARTBLOCK; return; } if (obno > *bno) *bno = obno; ASSERT((*bno >= obno) || (n == 0)); ASSERT(*bno < end); mval->br_startoff = *bno; if (isnullstartblock(got->br_startblock)) mval->br_startblock = DELAYSTARTBLOCK; else mval->br_startblock = got->br_startblock + (*bno - got->br_startoff); /* * Return the minimum of what we got and what we asked for for * the length. We can use the len variable here because it is * modified below and we could have been there before coming * here if the first part of the allocation didn't overlap what * was asked for. */ mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, got->br_blockcount - (*bno - got->br_startoff)); mval->br_state = got->br_state; ASSERT(mval->br_blockcount <= len); return; } /* * Update and validate the extent map to return */ STATIC void xfs_bmapi_update_map( struct xfs_bmbt_irec **map, xfs_fileoff_t *bno, xfs_filblks_t *len, xfs_fileoff_t obno, xfs_fileoff_t end, int *n, uint32_t flags) { xfs_bmbt_irec_t *mval = *map; ASSERT((flags & XFS_BMAPI_ENTIRE) || ((mval->br_startoff + mval->br_blockcount) <= end)); ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || (mval->br_startoff < obno)); *bno = mval->br_startoff + mval->br_blockcount; *len = end - *bno; if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { /* update previous map with new information */ ASSERT(mval->br_startblock == mval[-1].br_startblock); ASSERT(mval->br_blockcount > mval[-1].br_blockcount); ASSERT(mval->br_state == mval[-1].br_state); mval[-1].br_blockcount = mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && mval[-1].br_state == mval->br_state) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; } else if (*n > 0 && mval->br_startblock == DELAYSTARTBLOCK && mval[-1].br_startblock == DELAYSTARTBLOCK && mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount) { mval[-1].br_blockcount += mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (!((*n == 0) && ((mval->br_startoff + mval->br_blockcount) <= obno))) { mval++; (*n)++; } *map = mval; } /* * Map file blocks to filesystem blocks without allocation. */ int xfs_bmapi_read( struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, struct xfs_bmbt_irec *mval, int *nmap, uint32_t flags) { struct xfs_mount *mp = ip->i_mount; int whichfork = xfs_bmapi_whichfork(flags); struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec got; xfs_fileoff_t obno; xfs_fileoff_t end; struct xfs_iext_cursor icur; int error; bool eof = false; int n = 0; ASSERT(*nmap >= 1); ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); if (WARN_ON_ONCE(!ifp)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; XFS_STATS_INC(mp, xs_blk_mapr); error = xfs_iread_extents(NULL, ip, whichfork); if (error) return error; if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) eof = true; end = bno + len; obno = bno; while (bno < end && n < *nmap) { /* Reading past eof, act as though there's a hole up to end. */ if (eof) got.br_startoff = end; if (got.br_startoff > bno) { /* Reading in a hole. */ mval->br_startoff = bno; mval->br_startblock = HOLESTARTBLOCK; mval->br_blockcount = XFS_FILBLKS_MIN(len, got.br_startoff - bno); mval->br_state = XFS_EXT_NORM; bno += mval->br_blockcount; len -= mval->br_blockcount; mval++; n++; continue; } /* set up the extent map to return. */ xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); /* If we're done, stop now. */ if (bno >= end || n >= *nmap) break; /* Else go on to the next record. */ if (!xfs_iext_next_extent(ifp, &icur, &got)) eof = true; } *nmap = n; return 0; } static int xfs_bmapi_allocate( struct xfs_bmalloca *bma) { struct xfs_mount *mp = bma->ip->i_mount; int whichfork = xfs_bmapi_whichfork(bma->flags); struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); int error; ASSERT(bma->length > 0); ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN); if (bma->flags & XFS_BMAPI_CONTIG) bma->minlen = bma->length; else bma->minlen = 1; if (!(bma->flags & XFS_BMAPI_METADATA)) { /* * For the data and COW fork, the first data in the file is * treated differently to all other allocations. For the * attribute fork, we only need to ensure the allocated range * is not on the busy list. */ bma->datatype = XFS_ALLOC_NOBUSY; if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) { bma->datatype |= XFS_ALLOC_USERDATA; if (bma->offset == 0) bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; if (mp->m_dalign && bma->length >= mp->m_dalign) { error = xfs_bmap_isaeof(bma, whichfork); if (error) return error; } } } if ((bma->datatype & XFS_ALLOC_USERDATA) && XFS_IS_REALTIME_INODE(bma->ip)) error = xfs_bmap_rtalloc(bma); else error = xfs_bmap_btalloc(bma); if (error) return error; if (bma->blkno == NULLFSBLOCK) return -ENOSPC; if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) { xfs_bmap_mark_sick(bma->ip, whichfork); return -EFSCORRUPTED; } if (bma->flags & XFS_BMAPI_ZERO) { error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); if (error) return error; } if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); /* * Bump the number of extents we've allocated * in this call. */ bma->nallocs++; if (bma->cur && bma->wasdel) bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL; bma->got.br_startoff = bma->offset; bma->got.br_startblock = bma->blkno; bma->got.br_blockcount = bma->length; bma->got.br_state = XFS_EXT_NORM; if (bma->flags & XFS_BMAPI_PREALLOC) bma->got.br_state = XFS_EXT_UNWRITTEN; if (bma->wasdel) error = xfs_bmap_add_extent_delay_real(bma, whichfork); else error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, whichfork, &bma->icur, &bma->cur, &bma->got, &bma->logflags, bma->flags); if (error) return error; /* * Update our extent pointer, given that xfs_bmap_add_extent_delay_real * or xfs_bmap_add_extent_hole_real might have merged it into one of * the neighbouring ones. */ xfs_iext_get_extent(ifp, &bma->icur, &bma->got); ASSERT(bma->got.br_startoff <= bma->offset); ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= bma->offset + bma->length); ASSERT(bma->got.br_state == XFS_EXT_NORM || bma->got.br_state == XFS_EXT_UNWRITTEN); return 0; } STATIC int xfs_bmapi_convert_unwritten( struct xfs_bmalloca *bma, struct xfs_bmbt_irec *mval, xfs_filblks_t len, uint32_t flags) { int whichfork = xfs_bmapi_whichfork(flags); struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); int tmp_logflags = 0; int error; /* check if we need to do unwritten->real conversion */ if (mval->br_state == XFS_EXT_UNWRITTEN && (flags & XFS_BMAPI_PREALLOC)) return 0; /* check if we need to do real->unwritten conversion */ if (mval->br_state == XFS_EXT_NORM && (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) return 0; /* * Modify (by adding) the state flag, if writing. */ ASSERT(mval->br_blockcount <= len); if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) { bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, bma->ip, whichfork); } mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; /* * Before insertion into the bmbt, zero the range being converted * if required. */ if (flags & XFS_BMAPI_ZERO) { error = xfs_zero_extent(bma->ip, mval->br_startblock, mval->br_blockcount); if (error) return error; } error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, &bma->icur, &bma->cur, mval, &tmp_logflags); /* * Log the inode core unconditionally in the unwritten extent conversion * path because the conversion might not have done so (e.g., if the * extent count hasn't changed). We need to make sure the inode is dirty * in the transaction for the sake of fsync(), even if nothing has * changed, because fsync() will not force the log for this transaction * unless it sees the inode pinned. * * Note: If we're only converting cow fork extents, there aren't * any on-disk updates to make, so we don't need to log anything. */ if (whichfork != XFS_COW_FORK) bma->logflags |= tmp_logflags | XFS_ILOG_CORE; if (error) return error; /* * Update our extent pointer, given that * xfs_bmap_add_extent_unwritten_real might have merged it into one * of the neighbouring ones. */ xfs_iext_get_extent(ifp, &bma->icur, &bma->got); /* * We may have combined previously unwritten space with written space, * so generate another request. */ if (mval->br_blockcount < len) return -EAGAIN; return 0; } xfs_extlen_t xfs_bmapi_minleft( struct xfs_trans *tp, struct xfs_inode *ip, int fork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork); if (tp && tp->t_highest_agno != NULLAGNUMBER) return 0; if (ifp->if_format != XFS_DINODE_FMT_BTREE) return 1; return be16_to_cpu(ifp->if_broot->bb_level) + 1; } /* * Log whatever the flags say, even if error. Otherwise we might miss detecting * a case where the data is changed, there's an error, and it's not logged so we * don't shutdown when we should. Don't bother logging extents/btree changes if * we converted to the other format. */ static void xfs_bmapi_finish( struct xfs_bmalloca *bma, int whichfork, int error) { struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); if ((bma->logflags & xfs_ilog_fext(whichfork)) && ifp->if_format != XFS_DINODE_FMT_EXTENTS) bma->logflags &= ~xfs_ilog_fext(whichfork); else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && ifp->if_format != XFS_DINODE_FMT_BTREE) bma->logflags &= ~xfs_ilog_fbroot(whichfork); if (bma->logflags) xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); if (bma->cur) xfs_btree_del_cursor(bma->cur, error); } /* * Map file blocks to filesystem blocks, and allocate blocks or convert the * extent state if necessary. Details behaviour is controlled by the flags * parameter. Only allocates blocks from a single allocation group, to avoid * locking problems. * * Returns 0 on success and places the extent mappings in mval. nmaps is used * as an input/output parameter where the caller specifies the maximum number * of mappings that may be returned and xfs_bmapi_write passes back the number * of mappings (including existing mappings) it found. * * Returns a negative error code on failure, including -ENOSPC when it could not * allocate any blocks and -ENOSR when it did allocate blocks to convert a * delalloc range, but those blocks were before the passed in range. */ int xfs_bmapi_write( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t bno, /* starting file offs. mapped */ xfs_filblks_t len, /* length to map in file */ uint32_t flags, /* XFS_BMAPI_... */ xfs_extlen_t total, /* total blocks needed */ struct xfs_bmbt_irec *mval, /* output: map values */ int *nmap) /* i/o: mval size/count */ { struct xfs_bmalloca bma = { .tp = tp, .ip = ip, .total = total, }; struct xfs_mount *mp = ip->i_mount; int whichfork = xfs_bmapi_whichfork(flags); struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); xfs_fileoff_t end; /* end of mapped file region */ bool eof = false; /* after the end of extents */ int error; /* error return */ int n; /* current extent index */ xfs_fileoff_t obno; /* old block number (offset) */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ int orig_flags; /* original flags arg value */ xfs_filblks_t orig_len; /* original value of len arg */ struct xfs_bmbt_irec *orig_mval; /* original value of mval */ int orig_nmap; /* original value of *nmap */ orig_bno = bno; orig_len = len; orig_flags = flags; orig_mval = mval; orig_nmap = *nmap; #endif ASSERT(*nmap >= 1); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(tp != NULL); ASSERT(len > 0); ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(!(flags & XFS_BMAPI_REMAP)); /* zeroing is for currently only for data extents, not metadata */ ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); /* * we can allocate unwritten extents or pre-zero allocated blocks, * but it makes no sense to do both at once. This would result in * zeroing the unwritten extent twice, but it still being an * unwritten extent.... */ ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; XFS_STATS_INC(mp, xs_blk_mapw); error = xfs_iread_extents(tp, ip, whichfork); if (error) goto error0; if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) eof = true; if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) bma.prev.br_startoff = NULLFILEOFF; bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); n = 0; end = bno + len; obno = bno; while (bno < end && n < *nmap) { bool need_alloc = false, wasdelay = false; /* in hole or beyond EOF? */ if (eof || bma.got.br_startoff > bno) { /* * CoW fork conversions should /never/ hit EOF or * holes. There should always be something for us * to work on. */ ASSERT(!((flags & XFS_BMAPI_CONVERT) && (flags & XFS_BMAPI_COWFORK))); need_alloc = true; } else if (isnullstartblock(bma.got.br_startblock)) { wasdelay = true; } /* * First, deal with the hole before the allocated space * that we found, if any. */ if (need_alloc || wasdelay) { bma.eof = eof; bma.conv = !!(flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.offset = bno; bma.flags = flags; /* * There's a 32/64 bit type mismatch between the * allocation length request (which can be 64 bits in * length) and the bma length request, which is * xfs_extlen_t and therefore 32 bits. Hence we have to * be careful and do the min() using the larger type to * avoid overflows. */ bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN); if (wasdelay) { bma.length = XFS_FILBLKS_MIN(bma.length, bma.got.br_blockcount - (bno - bma.got.br_startoff)); } else { if (!eof) bma.length = XFS_FILBLKS_MIN(bma.length, bma.got.br_startoff - bno); } ASSERT(bma.length > 0); error = xfs_bmapi_allocate(&bma); if (error) { /* * If we already allocated space in a previous * iteration return what we go so far when * running out of space. */ if (error == -ENOSPC && bma.nallocs) break; goto error0; } /* * If this is a CoW allocation, record the data in * the refcount btree for orphan recovery. */ if (whichfork == XFS_COW_FORK) xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip), bma.blkno, bma.length); } /* Deal with the allocated space we found. */ xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, end, n, flags); /* Execute unwritten extent conversion if necessary */ error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); if (error == -EAGAIN) continue; if (error) goto error0; /* update the extent map to return */ xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); /* * If we're done, stop now. Stop when we've allocated * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise * the transaction may get too big. */ if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) break; /* Else go on to the next record. */ bma.prev = bma.got; if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) eof = true; } error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, whichfork); if (error) goto error0; ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); xfs_bmapi_finish(&bma, whichfork, 0); xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, orig_nmap, n); /* * When converting delayed allocations, xfs_bmapi_allocate ignores * the passed in bno and always converts from the start of the found * delalloc extent. * * To avoid a successful return with *nmap set to 0, return the magic * -ENOSR error code for this particular case so that the caller can * handle it. */ if (!n) { ASSERT(bma.nallocs >= *nmap); return -ENOSR; } *nmap = n; return 0; error0: xfs_bmapi_finish(&bma, whichfork, error); return error; } /* * Convert an existing delalloc extent to real blocks based on file offset. This * attempts to allocate the entire delalloc extent and may require multiple * invocations to allocate the target offset if a large enough physical extent * is not available. */ static int xfs_bmapi_convert_one_delalloc( struct xfs_inode *ip, int whichfork, xfs_off_t offset, struct iomap *iomap, unsigned int *seq) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); struct xfs_bmalloca bma = { NULL }; uint16_t flags = 0; struct xfs_trans *tp; int error; if (whichfork == XFS_COW_FORK) flags |= IOMAP_F_SHARED; /* * Space for the extent and indirect blocks was reserved when the * delalloc extent was created so there's no need to do so here. */ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, XFS_TRANS_RESERVE, &tp); if (error) return error; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); error = xfs_iext_count_extend(tp, ip, whichfork, XFS_IEXT_ADD_NOSPLIT_CNT); if (error) goto out_trans_cancel; if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || bma.got.br_startoff > offset_fsb) { /* * No extent found in the range we are trying to convert. This * should only happen for the COW fork, where another thread * might have moved the extent to the data fork in the meantime. */ WARN_ON_ONCE(whichfork != XFS_COW_FORK); error = -EAGAIN; goto out_trans_cancel; } /* * If we find a real extent here we raced with another thread converting * the extent. Just return the real extent at this offset. */ if (!isnullstartblock(bma.got.br_startblock)) { xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, xfs_iomap_inode_sequence(ip, flags)); if (seq) *seq = READ_ONCE(ifp->if_seq); goto out_trans_cancel; } bma.tp = tp; bma.ip = ip; bma.wasdel = true; bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); /* * Always allocate convert from the start of the delalloc extent even if * that is outside the passed in range to create large contiguous * extents on disk. */ bma.offset = bma.got.br_startoff; bma.length = bma.got.br_blockcount; /* * When we're converting the delalloc reservations backing dirty pages * in the page cache, we must be careful about how we create the new * extents: * * New CoW fork extents are created unwritten, turned into real extents * when we're about to write the data to disk, and mapped into the data * fork after the write finishes. End of story. * * New data fork extents must be mapped in as unwritten and converted * to real extents after the write succeeds to avoid exposing stale * disk contents if we crash. */ bma.flags = XFS_BMAPI_PREALLOC; if (whichfork == XFS_COW_FORK) bma.flags |= XFS_BMAPI_COWFORK; if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) bma.prev.br_startoff = NULLFILEOFF; error = xfs_bmapi_allocate(&bma); if (error) goto out_finish; XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); XFS_STATS_INC(mp, xs_xstrat_quick); ASSERT(!isnullstartblock(bma.got.br_startblock)); xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, xfs_iomap_inode_sequence(ip, flags)); if (seq) *seq = READ_ONCE(ifp->if_seq); if (whichfork == XFS_COW_FORK) xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip), bma.blkno, bma.length); error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, whichfork); if (error) goto out_finish; xfs_bmapi_finish(&bma, whichfork, 0); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; out_finish: xfs_bmapi_finish(&bma, whichfork, error); out_trans_cancel: xfs_trans_cancel(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } /* * Pass in a dellalloc extent and convert it to real extents, return the real * extent that maps offset_fsb in iomap. */ int xfs_bmapi_convert_delalloc( struct xfs_inode *ip, int whichfork, loff_t offset, struct iomap *iomap, unsigned int *seq) { int error; /* * Attempt to allocate whatever delalloc extent currently backs offset * and put the result into iomap. Allocate in a loop because it may * take several attempts to allocate real blocks for a contiguous * delalloc extent if free space is sufficiently fragmented. */ do { error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset, iomap, seq); if (error) return error; } while (iomap->offset + iomap->length <= offset); return 0; } int xfs_bmapi_remap( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock, uint32_t flags) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp; struct xfs_btree_cur *cur = NULL; struct xfs_bmbt_irec got; struct xfs_iext_cursor icur; int whichfork = xfs_bmapi_whichfork(flags); int logflags = 0, error; ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(len > 0); ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN); xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | XFS_BMAPI_NORMAP))); ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { /* make sure we only reflink into a hole. */ ASSERT(got.br_startoff > bno); ASSERT(got.br_startoff - bno >= len); } ip->i_nblocks += len; ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (ifp->if_format == XFS_DINODE_FMT_BTREE) cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); got.br_startoff = bno; got.br_startblock = startblock; got.br_blockcount = len; if (flags & XFS_BMAPI_PREALLOC) got.br_state = XFS_EXT_UNWRITTEN; else got.br_state = XFS_EXT_NORM; error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, &cur, &got, &logflags, flags); if (error) goto error0; error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); error0: if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) logflags &= ~XFS_ILOG_DEXT; else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) logflags &= ~XFS_ILOG_DBROOT; if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (cur) xfs_btree_del_cursor(cur, error); return error; } /* * When a delalloc extent is split (e.g., due to a hole punch), the original * indlen reservation must be shared across the two new extents that are left * behind. * * Given the original reservation and the worst case indlen for the two new * extents (as calculated by xfs_bmap_worst_indlen()), split the original * reservation fairly across the two new extents. If necessary, steal available * blocks from a deleted extent to make up a reservation deficiency (e.g., if * ores == 1). The number of stolen blocks is returned. The availability and * subsequent accounting of stolen blocks is the responsibility of the caller. */ static void xfs_bmap_split_indlen( xfs_filblks_t ores, /* original res. */ xfs_filblks_t *indlen1, /* ext1 worst indlen */ xfs_filblks_t *indlen2) /* ext2 worst indlen */ { xfs_filblks_t len1 = *indlen1; xfs_filblks_t len2 = *indlen2; xfs_filblks_t nres = len1 + len2; /* new total res. */ xfs_filblks_t resfactor; /* * We can't meet the total required reservation for the two extents. * Calculate the percent of the overall shortage between both extents * and apply this percentage to each of the requested indlen values. * This distributes the shortage fairly and reduces the chances that one * of the two extents is left with nothing when extents are repeatedly * split. */ resfactor = (ores * 100); do_div(resfactor, nres); len1 *= resfactor; do_div(len1, 100); len2 *= resfactor; do_div(len2, 100); ASSERT(len1 + len2 <= ores); ASSERT(len1 < *indlen1 && len2 < *indlen2); /* * Hand out the remainder to each extent. If one of the two reservations * is zero, we want to make sure that one gets a block first. The loop * below starts with len1, so hand len2 a block right off the bat if it * is zero. */ ores -= (len1 + len2); ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); if (ores && !len2 && *indlen2) { len2++; ores--; } while (ores) { if (len1 < *indlen1) { len1++; ores--; } if (!ores) break; if (len2 < *indlen2) { len2++; ores--; } } *indlen1 = len1; *indlen2 = len2; } void xfs_bmap_del_extent_delay( struct xfs_inode *ip, int whichfork, struct xfs_iext_cursor *icur, struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del, uint32_t bflags) /* bmapi flags */ { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec new; int64_t da_old, da_new, da_diff = 0; xfs_fileoff_t del_endoff, got_endoff; xfs_filblks_t got_indlen, new_indlen, stolen = 0; uint32_t state = xfs_bmap_fork_to_state(whichfork); uint64_t fdblocks; bool isrt; XFS_STATS_INC(mp, xs_del_exlist); isrt = xfs_ifork_is_realtime(ip, whichfork); del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got->br_startoff + got->br_blockcount; da_old = startblockval(got->br_startblock); da_new = 0; ASSERT(del->br_blockcount > 0); ASSERT(got->br_startoff <= del->br_startoff); ASSERT(got_endoff >= del_endoff); /* * Update the inode delalloc counter now and wait to update the * sb counters as we might have to borrow some blocks for the * indirect block accounting. */ xfs_quota_unreserve_blkres(ip, del->br_blockcount); ip->i_delayed_blks -= del->br_blockcount; if (got->br_startoff == del->br_startoff) state |= BMAP_LEFT_FILLING; if (got_endoff == del_endoff) state |= BMAP_RIGHT_FILLING; switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Matches the whole extent. Delete the entry. */ xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); break; case BMAP_LEFT_FILLING: /* * Deleting the first part of the extent. */ got->br_startoff = del_endoff; got->br_blockcount -= del->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, got->br_blockcount), da_old); got->br_startblock = nullstartblock((int)da_new); xfs_iext_update_extent(ip, state, icur, got); break; case BMAP_RIGHT_FILLING: /* * Deleting the last part of the extent. */ got->br_blockcount = got->br_blockcount - del->br_blockcount; da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, got->br_blockcount), da_old); got->br_startblock = nullstartblock((int)da_new); xfs_iext_update_extent(ip, state, icur, got); break; case 0: /* * Deleting the middle of the extent. * * Distribute the original indlen reservation across the two new * extents. Steal blocks from the deleted extent if necessary. * Stealing blocks simply fudges the fdblocks accounting below. * Warn if either of the new indlen reservations is zero as this * can lead to delalloc problems. */ got->br_blockcount = del->br_startoff - got->br_startoff; got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); new.br_blockcount = got_endoff - del_endoff; new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); WARN_ON_ONCE(!got_indlen || !new_indlen); /* * Steal as many blocks as we can to try and satisfy the worst * case indlen for both new extents. * * However, we can't just steal reservations from the data * blocks if this is an RT inodes as the data and metadata * blocks come from different pools. We'll have to live with * under-filled indirect reservation in this case. */ da_new = got_indlen + new_indlen; if (da_new > da_old && !isrt) { stolen = XFS_FILBLKS_MIN(da_new - da_old, del->br_blockcount); da_old += stolen; } if (da_new > da_old) xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen); da_new = got_indlen + new_indlen; got->br_startblock = nullstartblock((int)got_indlen); new.br_startoff = del_endoff; new.br_state = got->br_state; new.br_startblock = nullstartblock((int)new_indlen); xfs_iext_update_extent(ip, state, icur, got); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); del->br_blockcount -= stolen; break; } ASSERT(da_old >= da_new); da_diff = da_old - da_new; fdblocks = da_diff; if (bflags & XFS_BMAPI_REMAP) { ; } else if (isrt) { xfs_rtbxlen_t rtxlen; rtxlen = xfs_blen_to_rtbxlen(mp, del->br_blockcount); if (xfs_is_zoned_inode(ip)) xfs_zoned_add_available(mp, rtxlen); xfs_add_frextents(mp, rtxlen); } else { fdblocks += del->br_blockcount; } xfs_add_fdblocks(mp, fdblocks); xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff); } void xfs_bmap_del_extent_cow( struct xfs_inode *ip, struct xfs_iext_cursor *icur, struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); struct xfs_bmbt_irec new; xfs_fileoff_t del_endoff, got_endoff; uint32_t state = BMAP_COWFORK; XFS_STATS_INC(mp, xs_del_exlist); del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got->br_startoff + got->br_blockcount; ASSERT(del->br_blockcount > 0); ASSERT(got->br_startoff <= del->br_startoff); ASSERT(got_endoff >= del_endoff); ASSERT(!isnullstartblock(got->br_startblock)); if (got->br_startoff == del->br_startoff) state |= BMAP_LEFT_FILLING; if (got_endoff == del_endoff) state |= BMAP_RIGHT_FILLING; switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Matches the whole extent. Delete the entry. */ xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); break; case BMAP_LEFT_FILLING: /* * Deleting the first part of the extent. */ got->br_startoff = del_endoff; got->br_blockcount -= del->br_blockcount; got->br_startblock = del->br_startblock + del->br_blockcount; xfs_iext_update_extent(ip, state, icur, got); break; case BMAP_RIGHT_FILLING: /* * Deleting the last part of the extent. */ got->br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, got); break; case 0: /* * Deleting the middle of the extent. */ got->br_blockcount = del->br_startoff - got->br_startoff; new.br_startoff = del_endoff; new.br_blockcount = got_endoff - del_endoff; new.br_state = got->br_state; new.br_startblock = del->br_startblock + del->br_blockcount; xfs_iext_update_extent(ip, state, icur, got); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); break; } ip->i_delayed_blks -= del->br_blockcount; } static int xfs_bmap_free_rtblocks( struct xfs_trans *tp, struct xfs_bmbt_irec *del) { struct xfs_rtgroup *rtg; int error; rtg = xfs_rtgroup_grab(tp->t_mountp, 0); if (!rtg) return -EIO; /* * Ensure the bitmap and summary inodes are locked and joined to the * transaction before modifying them. */ if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) { tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED; xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP); xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_BITMAP); } error = xfs_rtfree_blocks(tp, rtg, del->br_startblock, del->br_blockcount); xfs_rtgroup_rele(rtg); return error; } /* * Called by xfs_bmapi to update file extent records and the btree * after removing space. */ STATIC int /* error */ xfs_bmap_del_extent_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_trans_t *tp, /* current transaction pointer */ struct xfs_iext_cursor *icur, struct xfs_btree_cur *cur, /* if null, not a btree */ xfs_bmbt_irec_t *del, /* data to remove from extents */ int *logflagsp, /* inode logging flags */ int whichfork, /* data or attr fork */ uint32_t bflags) /* bmapi flags */ { xfs_fsblock_t del_endblock=0; /* first block past del */ xfs_fileoff_t del_endoff; /* first offset past del */ int error = 0; /* error return value */ struct xfs_bmbt_irec got; /* current extent entry */ xfs_fileoff_t got_endoff; /* first offset past got */ int i; /* temp state */ struct xfs_ifork *ifp; /* inode fork pointer */ xfs_mount_t *mp; /* mount structure */ xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ uint qfield; /* quota field to update */ uint32_t state = xfs_bmap_fork_to_state(whichfork); struct xfs_bmbt_irec old; *logflagsp = 0; mp = ip->i_mount; XFS_STATS_INC(mp, xs_del_exlist); ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(del->br_blockcount > 0); xfs_iext_get_extent(ifp, icur, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got.br_startoff + got.br_blockcount; ASSERT(got_endoff >= del_endoff); ASSERT(!isnullstartblock(got.br_startblock)); qfield = 0; /* * If it's the case where the directory code is running with no block * reservation, and the deleted block is in the middle of its extent, * and the resulting insert of an extent would cause transformation to * btree format, then reject it. The calling code will then swap blocks * around instead. We have to do this now, rather than waiting for the * conversion to btree format, since the transaction will be dirty then. */ if (tp->t_blk_res == 0 && ifp->if_format == XFS_DINODE_FMT_EXTENTS && ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && del->br_startoff > got.br_startoff && del_endoff < got_endoff) return -ENOSPC; *logflagsp = XFS_ILOG_CORE; if (xfs_ifork_is_realtime(ip, whichfork)) qfield = XFS_TRANS_DQ_RTBCOUNT; else qfield = XFS_TRANS_DQ_BCOUNT; nblks = del->br_blockcount; del_endblock = del->br_startblock + del->br_blockcount; if (cur) { error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } } if (got.br_startoff == del->br_startoff) state |= BMAP_LEFT_FILLING; if (got_endoff == del_endoff) state |= BMAP_RIGHT_FILLING; switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Matches the whole extent. Delete the entry. */ xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); ifp->if_nextents--; *logflagsp |= XFS_ILOG_CORE; if (!cur) { *logflagsp |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_btree_delete(cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } break; case BMAP_LEFT_FILLING: /* * Deleting the first part of the extent. */ got.br_startoff = del_endoff; got.br_startblock = del_endblock; got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { *logflagsp |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) return error; break; case BMAP_RIGHT_FILLING: /* * Deleting the last part of the extent. */ got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { *logflagsp |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) return error; break; case 0: /* * Deleting the middle of the extent. */ old = got; got.br_blockcount = del->br_startoff - got.br_startoff; xfs_iext_update_extent(ip, state, icur, &got); new.br_startoff = del_endoff; new.br_blockcount = got_endoff - del_endoff; new.br_state = got.br_state; new.br_startblock = del_endblock; *logflagsp |= XFS_ILOG_CORE; if (cur) { error = xfs_bmbt_update(cur, &got); if (error) return error; error = xfs_btree_increment(cur, 0, &i); if (error) return error; cur->bc_rec.b = new; error = xfs_btree_insert(cur, &i); if (error && error != -ENOSPC) return error; /* * If get no-space back from btree insert, it tried a * split, and we have a zero block reservation. Fix up * our state and return the error. */ if (error == -ENOSPC) { /* * Reset the cursor, don't trust it after any * insert operation. */ error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } /* * Update the btree record back * to the original value. */ error = xfs_bmbt_update(cur, &old); if (error) return error; /* * Reset the extent record back * to the original value. */ xfs_iext_update_extent(ip, state, icur, &old); *logflagsp = 0; return -ENOSPC; } if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } } else *logflagsp |= xfs_ilog_fext(whichfork); ifp->if_nextents++; xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); break; } /* remove reverse mapping */ xfs_rmap_unmap_extent(tp, ip, whichfork, del); /* * If we need to, add to list of extents to delete. */ if (!(bflags & XFS_BMAPI_REMAP)) { bool isrt = xfs_ifork_is_realtime(ip, whichfork); if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { xfs_refcount_decrease_extent(tp, isrt, del); } else if (isrt && !xfs_has_rtgroups(mp)) { error = xfs_bmap_free_rtblocks(tp, del); } else { unsigned int efi_flags = 0; if ((bflags & XFS_BMAPI_NODISCARD) || del->br_state == XFS_EXT_UNWRITTEN) efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD; /* * Historically, we did not use EFIs to free realtime * extents. However, when reverse mapping is enabled, * we must maintain the same order of operations as the * data device, which is: Remove the file mapping, * remove the reverse mapping, and then free the * blocks. Reflink for realtime volumes requires the * same sort of ordering. Both features rely on * rtgroups, so let's gate rt EFI usage on rtgroups. */ if (isrt) efi_flags |= XFS_FREE_EXTENT_REALTIME; error = xfs_free_extent_later(tp, del->br_startblock, del->br_blockcount, NULL, XFS_AG_RESV_NONE, efi_flags); } if (error) return error; } /* * Adjust inode # blocks in the file. */ if (nblks) ip->i_nblocks -= nblks; /* * Adjust quota data. */ if (qfield && !(bflags & XFS_BMAPI_REMAP)) xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); return 0; } /* * Unmap (remove) blocks from a file. * If nexts is nonzero then the number of extents to remove is limited to * that value. If not all extents in the block range can be removed then * *done is set. */ static int __xfs_bunmapi( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t start, /* first file offset deleted */ xfs_filblks_t *rlen, /* i/o: amount remaining */ uint32_t flags, /* misc flags */ xfs_extnum_t nexts) /* number of extents max */ { struct xfs_btree_cur *cur; /* bmap btree cursor */ struct xfs_bmbt_irec del; /* extent being deleted */ int error; /* error return value */ xfs_extnum_t extno; /* extent number in list */ struct xfs_bmbt_irec got; /* current extent record */ struct xfs_ifork *ifp; /* inode fork pointer */ int isrt; /* freeing in rt area */ int logflags; /* transaction logging flags */ xfs_extlen_t mod; /* rt extent offset */ struct xfs_mount *mp = ip->i_mount; int tmp_logflags; /* partial logging flags */ int wasdel; /* was a delayed alloc extent */ int whichfork; /* data or attribute fork */ xfs_filblks_t len = *rlen; /* length to unmap in file */ xfs_fileoff_t end; struct xfs_iext_cursor icur; bool done = false; trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); whichfork = xfs_bmapi_whichfork(flags); ASSERT(whichfork != XFS_COW_FORK); ifp = xfs_ifork_ptr(ip, whichfork); if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(len > 0); ASSERT(nexts >= 0); error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; if (xfs_iext_count(ifp) == 0) { *rlen = 0; return 0; } XFS_STATS_INC(mp, xs_blk_unmap); isrt = xfs_ifork_is_realtime(ip, whichfork); end = start + len; if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { *rlen = 0; return 0; } end--; logflags = 0; if (ifp->if_format == XFS_DINODE_FMT_BTREE) { ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); } else cur = NULL; extno = 0; while (end != (xfs_fileoff_t)-1 && end >= start && (nexts == 0 || extno < nexts)) { /* * Is the found extent after a hole in which end lives? * Just back up to the previous extent, if so. */ if (got.br_startoff > end && !xfs_iext_prev_extent(ifp, &icur, &got)) { done = true; break; } /* * Is the last block of this extent before the range * we're supposed to delete? If so, we're done. */ end = XFS_FILEOFF_MIN(end, got.br_startoff + got.br_blockcount - 1); if (end < start) break; /* * Then deal with the (possibly delayed) allocated space * we found. */ del = got; wasdel = isnullstartblock(del.br_startblock); if (got.br_startoff < start) { del.br_startoff = start; del.br_blockcount -= start - got.br_startoff; if (!wasdel) del.br_startblock += start - got.br_startoff; } if (del.br_startoff + del.br_blockcount > end + 1) del.br_blockcount = end + 1 - del.br_startoff; if (!isrt || (flags & XFS_BMAPI_REMAP)) goto delete; mod = xfs_rtb_to_rtxoff(mp, del.br_startblock + del.br_blockcount); if (mod) { /* * Realtime extent not lined up at the end. * The extent could have been split into written * and unwritten pieces, or we could just be * unmapping part of it. But we can't really * get rid of part of a realtime extent. */ if (del.br_state == XFS_EXT_UNWRITTEN) { /* * This piece is unwritten, or we're not * using unwritten extents. Skip over it. */ ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod); end -= mod > del.br_blockcount ? del.br_blockcount : mod; if (end < got.br_startoff && !xfs_iext_prev_extent(ifp, &icur, &got)) { done = true; break; } continue; } /* * It's written, turn it unwritten. * This is better than zeroing it. */ ASSERT(del.br_state == XFS_EXT_NORM); ASSERT(tp->t_blk_res > 0); /* * If this spans a realtime extent boundary, * chop it back to the start of the one we end at. */ if (del.br_blockcount > mod) { del.br_startoff += del.br_blockcount - mod; del.br_startblock += del.br_blockcount - mod; del.br_blockcount = mod; } del.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, whichfork, &icur, &cur, &del, &logflags); if (error) goto error0; goto nodelete; } mod = xfs_rtb_to_rtxoff(mp, del.br_startblock); if (mod) { xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; /* * Realtime extent is lined up at the end but not * at the front. We'll get rid of full extents if * we can. */ if (del.br_blockcount > off) { del.br_blockcount -= off; del.br_startoff += off; del.br_startblock += off; } else if (del.br_startoff == start && (del.br_state == XFS_EXT_UNWRITTEN || tp->t_blk_res == 0)) { /* * Can't make it unwritten. There isn't * a full extent here so just skip it. */ ASSERT(end >= del.br_blockcount); end -= del.br_blockcount; if (got.br_startoff > end && !xfs_iext_prev_extent(ifp, &icur, &got)) { done = true; break; } continue; } else if (del.br_state == XFS_EXT_UNWRITTEN) { struct xfs_bmbt_irec prev; xfs_fileoff_t unwrite_start; /* * This one is already unwritten. * It must have a written left neighbor. * Unwrite the killed part of that one and * try again. */ if (!xfs_iext_prev_extent(ifp, &icur, &prev)) ASSERT(0); ASSERT(prev.br_state == XFS_EXT_NORM); ASSERT(!isnullstartblock(prev.br_startblock)); ASSERT(del.br_startblock == prev.br_startblock + prev.br_blockcount); unwrite_start = max3(start, del.br_startoff - mod, prev.br_startoff); mod = unwrite_start - prev.br_startoff; prev.br_startoff = unwrite_start; prev.br_startblock += mod; prev.br_blockcount -= mod; prev.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, whichfork, &icur, &cur, &prev, &logflags); if (error) goto error0; goto nodelete; } else { ASSERT(del.br_state == XFS_EXT_NORM); del.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, whichfork, &icur, &cur, &del, &logflags); if (error) goto error0; goto nodelete; } } delete: if (wasdel) { xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del, flags); } else { error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, &del, &tmp_logflags, whichfork, flags); logflags |= tmp_logflags; if (error) goto error0; } end = del.br_startoff - 1; nodelete: /* * If not done go on to the next (previous) record. */ if (end != (xfs_fileoff_t)-1 && end >= start) { if (!xfs_iext_get_extent(ifp, &icur, &got) || (got.br_startoff > end && !xfs_iext_prev_extent(ifp, &icur, &got))) { done = true; break; } extno++; } } if (done || end == (xfs_fileoff_t)-1 || end < start) *rlen = 0; else *rlen = end - start + 1; /* * Convert to a btree if necessary. */ if (xfs_bmap_needs_btree(ip, whichfork)) { ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, &tmp_logflags, whichfork); logflags |= tmp_logflags; } else { error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); } error0: /* * Log everything. Do this after conversion, there's no point in * logging the extent records if we've converted to btree format. */ if ((logflags & xfs_ilog_fext(whichfork)) && ifp->if_format != XFS_DINODE_FMT_EXTENTS) logflags &= ~xfs_ilog_fext(whichfork); else if ((logflags & xfs_ilog_fbroot(whichfork)) && ifp->if_format != XFS_DINODE_FMT_BTREE) logflags &= ~xfs_ilog_fbroot(whichfork); /* * Log inode even in the error case, if the transaction * is dirty we'll need to shut down the filesystem. */ if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (cur) { if (!error) cur->bc_bmap.allocated = 0; xfs_btree_del_cursor(cur, error); } return error; } /* Unmap a range of a file. */ int xfs_bunmapi( xfs_trans_t *tp, struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, uint32_t flags, xfs_extnum_t nexts, int *done) { int error; error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); *done = (len == 0); return error; } /* * Determine whether an extent shift can be accomplished by a merge with the * extent that precedes the target hole of the shift. */ STATIC bool xfs_bmse_can_merge( struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *left, /* preceding extent */ struct xfs_bmbt_irec *got, /* current extent to shift */ xfs_fileoff_t shift) /* shift fsb */ { xfs_fileoff_t startoff; startoff = got->br_startoff - shift; /* * The extent, once shifted, must be adjacent in-file and on-disk with * the preceding extent. */ if ((left->br_startoff + left->br_blockcount != startoff) || (left->br_startblock + left->br_blockcount != got->br_startblock) || (left->br_state != got->br_state) || (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN) || !xfs_bmap_same_rtgroup(ip, whichfork, left, got)) return false; return true; } /* * A bmap extent shift adjusts the file offset of an extent to fill a preceding * hole in the file. If an extent shift would result in the extent being fully * adjacent to the extent that currently precedes the hole, we can merge with * the preceding extent rather than do the shift. * * This function assumes the caller has verified a shift-by-merge is possible * with the provided extents via xfs_bmse_can_merge(). */ STATIC int xfs_bmse_merge( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, xfs_fileoff_t shift, /* shift fsb */ struct xfs_iext_cursor *icur, struct xfs_bmbt_irec *got, /* extent to shift */ struct xfs_bmbt_irec *left, /* preceding extent */ struct xfs_btree_cur *cur, int *logflags) /* output */ { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_bmbt_irec new; xfs_filblks_t blockcount; int error, i; struct xfs_mount *mp = ip->i_mount; blockcount = left->br_blockcount + got->br_blockcount; xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); ASSERT(xfs_bmse_can_merge(ip, whichfork, left, got, shift)); new = *left; new.br_blockcount = blockcount; /* * Update the on-disk extent count, the btree if necessary and log the * inode. */ ifp->if_nextents--; *logflags |= XFS_ILOG_CORE; if (!cur) { *logflags |= XFS_ILOG_DEXT; goto done; } /* lookup and remove the extent to merge */ error = xfs_bmbt_lookup_eq(cur, got, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } error = xfs_btree_delete(cur, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } /* lookup and update size of the previous extent */ error = xfs_bmbt_lookup_eq(cur, left, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } error = xfs_bmbt_update(cur, &new); if (error) return error; /* change to extent format if required after extent removal */ error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); if (error) return error; done: xfs_iext_remove(ip, icur, 0); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, &new); /* update reverse mapping. rmap functions merge the rmaps for us */ xfs_rmap_unmap_extent(tp, ip, whichfork, got); memcpy(&new, got, sizeof(new)); new.br_startoff = left->br_startoff + left->br_blockcount; xfs_rmap_map_extent(tp, ip, whichfork, &new); return 0; } static int xfs_bmap_shift_update_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_iext_cursor *icur, struct xfs_bmbt_irec *got, struct xfs_btree_cur *cur, int *logflags, xfs_fileoff_t startoff) { struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec prev = *got; int error, i; *logflags |= XFS_ILOG_CORE; got->br_startoff = startoff; if (cur) { error = xfs_bmbt_lookup_eq(cur, &prev, &i); if (error) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } error = xfs_bmbt_update(cur, got); if (error) return error; } else { *logflags |= XFS_ILOG_DEXT; } xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, got); /* update reverse mapping */ xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); xfs_rmap_map_extent(tp, ip, whichfork, got); return 0; } int xfs_bmap_collapse_extents( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done) { int whichfork = XFS_DATA_FORK; struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_cur *cur = NULL; struct xfs_bmbt_irec got, prev; struct xfs_iext_cursor icur; xfs_fileoff_t new_startoff; int error = 0; int logflags = 0; if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; if (ifp->if_format == XFS_DINODE_FMT_BTREE) cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { *done = true; goto del_cursor; } if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { xfs_bmap_mark_sick(ip, whichfork); error = -EFSCORRUPTED; goto del_cursor; } new_startoff = got.br_startoff - offset_shift_fsb; if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { if (new_startoff < prev.br_startoff + prev.br_blockcount) { error = -EINVAL; goto del_cursor; } if (xfs_bmse_can_merge(ip, whichfork, &prev, &got, offset_shift_fsb)) { error = xfs_bmse_merge(tp, ip, whichfork, offset_shift_fsb, &icur, &got, &prev, cur, &logflags); if (error) goto del_cursor; goto done; } } else { if (got.br_startoff < offset_shift_fsb) { error = -EINVAL; goto del_cursor; } } error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, cur, &logflags, new_startoff); if (error) goto del_cursor; done: if (!xfs_iext_next_extent(ifp, &icur, &got)) { *done = true; goto del_cursor; } *next_fsb = got.br_startoff; del_cursor: if (cur) xfs_btree_del_cursor(cur, error); if (logflags) xfs_trans_log_inode(tp, ip, logflags); return error; } /* Make sure we won't be right-shifting an extent past the maximum bound. */ int xfs_bmap_can_insert_extents( struct xfs_inode *ip, xfs_fileoff_t off, xfs_fileoff_t shift) { struct xfs_bmbt_irec got; int is_empty; int error = 0; xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL); if (xfs_is_shutdown(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); if (!error && !is_empty && got.br_startoff >= off && ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) error = -EINVAL; xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } int xfs_bmap_insert_extents( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done, xfs_fileoff_t stop_fsb) { int whichfork = XFS_DATA_FORK; struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_cur *cur = NULL; struct xfs_bmbt_irec got, next; struct xfs_iext_cursor icur; xfs_fileoff_t new_startoff; int error = 0; int logflags = 0; if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; if (ifp->if_format == XFS_DINODE_FMT_BTREE) cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); if (*next_fsb == NULLFSBLOCK) { xfs_iext_last(ifp, &icur); if (!xfs_iext_get_extent(ifp, &icur, &got) || stop_fsb > got.br_startoff) { *done = true; goto del_cursor; } } else { if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { *done = true; goto del_cursor; } } if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { xfs_bmap_mark_sick(ip, whichfork); error = -EFSCORRUPTED; goto del_cursor; } if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { xfs_bmap_mark_sick(ip, whichfork); error = -EFSCORRUPTED; goto del_cursor; } new_startoff = got.br_startoff + offset_shift_fsb; if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { if (new_startoff + got.br_blockcount > next.br_startoff) { error = -EINVAL; goto del_cursor; } /* * Unlike a left shift (which involves a hole punch), a right * shift does not modify extent neighbors in any way. We should * never find mergeable extents in this scenario. Check anyways * and warn if we encounter two extents that could be one. */ if (xfs_bmse_can_merge(ip, whichfork, &got, &next, offset_shift_fsb)) WARN_ON_ONCE(1); } error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, cur, &logflags, new_startoff); if (error) goto del_cursor; if (!xfs_iext_prev_extent(ifp, &icur, &got) || stop_fsb >= got.br_startoff + got.br_blockcount) { *done = true; goto del_cursor; } *next_fsb = got.br_startoff; del_cursor: if (cur) xfs_btree_del_cursor(cur, error); if (logflags) xfs_trans_log_inode(tp, ip, logflags); return error; } /* * Splits an extent into two extents at split_fsb block such that it is the * first block of the current_ext. @ext is a target extent to be split. * @split_fsb is a block where the extents is split. If split_fsb lies in a * hole or the first block of extents, just return 0. */ int xfs_bmap_split_extent( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t split_fsb) { int whichfork = XFS_DATA_FORK; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_cur *cur = NULL; struct xfs_bmbt_irec got; struct xfs_bmbt_irec new; /* split extent */ struct xfs_mount *mp = ip->i_mount; xfs_fsblock_t gotblkcnt; /* new block count for got */ struct xfs_iext_cursor icur; int error = 0; int logflags = 0; int i = 0; if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, whichfork); return -EFSCORRUPTED; } if (xfs_is_shutdown(mp)) return -EIO; /* Read in all the extents */ error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; /* * If there are not extents, or split_fsb lies in a hole we are done. */ if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || got.br_startoff >= split_fsb) return 0; gotblkcnt = split_fsb - got.br_startoff; new.br_startoff = split_fsb; new.br_startblock = got.br_startblock + gotblkcnt; new.br_blockcount = got.br_blockcount - gotblkcnt; new.br_state = got.br_state; if (ifp->if_format == XFS_DINODE_FMT_BTREE) { cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) goto del_cursor; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto del_cursor; } } got.br_blockcount = gotblkcnt; xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, &got); logflags = XFS_ILOG_CORE; if (cur) { error = xfs_bmbt_update(cur, &got); if (error) goto del_cursor; } else logflags |= XFS_ILOG_DEXT; /* Add new extent */ xfs_iext_next(ifp, &icur); xfs_iext_insert(ip, &icur, &new, 0); ifp->if_nextents++; if (cur) { error = xfs_bmbt_lookup_eq(cur, &new, &i); if (error) goto del_cursor; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto del_cursor; } error = xfs_btree_insert(cur, &i); if (error) goto del_cursor; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cur); error = -EFSCORRUPTED; goto del_cursor; } } /* * Convert to a btree if necessary. */ if (xfs_bmap_needs_btree(ip, whichfork)) { int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, &tmp_logflags, whichfork); logflags |= tmp_logflags; } del_cursor: if (cur) { cur->bc_bmap.allocated = 0; xfs_btree_del_cursor(cur, error); } if (logflags) xfs_trans_log_inode(tp, ip, logflags); return error; } /* Record a bmap intent. */ static inline void __xfs_bmap_add( struct xfs_trans *tp, enum xfs_bmap_intent_type type, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *bmap) { struct xfs_bmap_intent *bi; if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) || bmap->br_startblock == HOLESTARTBLOCK || bmap->br_startblock == DELAYSTARTBLOCK) return; bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL); INIT_LIST_HEAD(&bi->bi_list); bi->bi_type = type; bi->bi_owner = ip; bi->bi_whichfork = whichfork; bi->bi_bmap = *bmap; xfs_bmap_defer_add(tp, bi); } /* Map an extent into a file. */ void xfs_bmap_map_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *PREV) { __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV); } /* Unmap an extent out of a file. */ void xfs_bmap_unmap_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *PREV) { __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV); } /* * Process one of the deferred bmap operations. We pass back the * btree cursor to maintain our lock on the bmapbt between calls. */ int xfs_bmap_finish_one( struct xfs_trans *tp, struct xfs_bmap_intent *bi) { struct xfs_bmbt_irec *bmap = &bi->bi_bmap; int error = 0; int flags = 0; if (bi->bi_whichfork == XFS_ATTR_FORK) flags |= XFS_BMAPI_ATTRFORK; ASSERT(tp->t_highest_agno == NULLAGNUMBER); trace_xfs_bmap_deferred(bi); if (XFS_TEST_ERROR(tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE)) return -EIO; switch (bi->bi_type) { case XFS_BMAP_MAP: if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN) flags |= XFS_BMAPI_PREALLOC; error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, bmap->br_blockcount, bmap->br_startblock, flags); bmap->br_blockcount = 0; break; case XFS_BMAP_UNMAP: error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, &bmap->br_blockcount, flags | XFS_BMAPI_REMAP, 1); break; default: ASSERT(0); xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork); error = -EFSCORRUPTED; } return error; } /* Check that an extent does not have invalid flags or bad ranges. */ xfs_failaddr_t xfs_bmap_validate_extent_raw( struct xfs_mount *mp, bool rtfile, int whichfork, struct xfs_bmbt_irec *irec) { if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) return __this_address; if (rtfile && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbext(mp, irec->br_startblock, irec->br_blockcount)) return __this_address; } else { if (!xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount)) return __this_address; } if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) return __this_address; return NULL; } int __init xfs_bmap_intent_init_cache(void) { xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent", sizeof(struct xfs_bmap_intent), 0, 0, NULL); return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM; } void xfs_bmap_intent_destroy_cache(void) { kmem_cache_destroy(xfs_bmap_intent_cache); xfs_bmap_intent_cache = NULL; } /* Check that an inode's extent does not have invalid flags or bad ranges. */ xfs_failaddr_t xfs_bmap_validate_extent( struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *irec) { return xfs_bmap_validate_extent_raw(ip->i_mount, XFS_IS_REALTIME_INODE(ip), whichfork, irec); } /* * Used in xfs_itruncate_extents(). This is the maximum number of extents * freed from a file in a single transaction. */ #define XFS_ITRUNC_MAX_EXTENTS 2 /* * Unmap every extent in part of an inode's fork. We don't do any higher level * invalidation work at all. */ int xfs_bunmapi_range( struct xfs_trans **tpp, struct xfs_inode *ip, uint32_t flags, xfs_fileoff_t startoff, xfs_fileoff_t endoff) { xfs_filblks_t unmap_len = endoff - startoff + 1; int error = 0; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); while (unmap_len > 0) { ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER); error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags, XFS_ITRUNC_MAX_EXTENTS); if (error) goto out; /* free the just unmapped extents */ error = xfs_defer_finish(tpp); if (error) goto out; cond_resched(); } out: return error; } struct xfs_bmap_query_range { xfs_bmap_query_range_fn fn; void *priv; }; /* Format btree record and pass to our callback. */ STATIC int xfs_bmap_query_range_helper( struct xfs_btree_cur *cur, const union xfs_btree_rec *rec, void *priv) { struct xfs_bmap_query_range *query = priv; struct xfs_bmbt_irec irec; xfs_failaddr_t fa; xfs_bmbt_disk_get_all(&rec->bmbt, &irec); fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork, &irec); if (fa) { xfs_btree_mark_sick(cur); return xfs_bmap_complain_bad_rec(cur->bc_ino.ip, cur->bc_ino.whichfork, fa, &irec); } return query->fn(cur, &irec, query->priv); } /* Find all bmaps. */ int xfs_bmap_query_all( struct xfs_btree_cur *cur, xfs_bmap_query_range_fn fn, void *priv) { struct xfs_bmap_query_range query = { .priv = priv, .fn = fn, }; return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query); } /* Helper function to extract extent size hint from inode */ xfs_extlen_t xfs_get_extsz_hint( struct xfs_inode *ip) { /* * No point in aligning allocations if we need to COW to actually * write to them. */ if (!xfs_is_always_cow_inode(ip) && (ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) return ip->i_extsize; if (XFS_IS_REALTIME_INODE(ip) && ip->i_mount->m_sb.sb_rextsize > 1) return ip->i_mount->m_sb.sb_rextsize; return 0; } /* * Helper function to extract CoW extent size hint from inode. * Between the extent size hint and the CoW extent size hint, we * return the greater of the two. If the value is zero (automatic), * use the default size. */ xfs_extlen_t xfs_get_cowextsz_hint( struct xfs_inode *ip) { xfs_extlen_t a, b; a = 0; if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) a = ip->i_cowextsize; if (XFS_IS_REALTIME_INODE(ip)) { b = 0; if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) b = ip->i_extsize; } else { b = xfs_get_extsz_hint(ip); } a = max(a, b); if (a == 0) return XFS_DEFAULT_COWEXTSZ_HINT; return a; } |
| 8 8 3 1 1 1 3 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 | // SPDX-License-Identifier: GPL-2.0 /* * Filesystem-level keyring for fscrypt * * Copyright 2019 Google LLC */ /* * This file implements management of fscrypt master keys in the * filesystem-level keyring, including the ioctls: * * - FS_IOC_ADD_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS * - FS_IOC_GET_ENCRYPTION_KEY_STATUS * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more * information about these ioctls. */ #include <crypto/skcipher.h> #include <linux/export.h> #include <linux/key-type.h> #include <linux/once.h> #include <linux/random.h> #include <linux/seq_file.h> #include <linux/unaligned.h> #include "fscrypt_private.h" /* The master encryption keys for a filesystem (->s_master_keys) */ struct fscrypt_keyring { /* * Lock that protects ->key_hashtable. It does *not* protect the * fscrypt_master_key structs themselves. */ spinlock_t lock; /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ struct hlist_head key_hashtable[128]; }; static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { memzero_explicit(secret, sizeof(*secret)); } static void move_master_key_secret(struct fscrypt_master_key_secret *dst, struct fscrypt_master_key_secret *src) { memcpy(dst, src, sizeof(*dst)); memzero_explicit(src, sizeof(*src)); } static void fscrypt_free_master_key(struct rcu_head *head) { struct fscrypt_master_key *mk = container_of(head, struct fscrypt_master_key, mk_rcu_head); /* * The master key secret and any embedded subkeys should have already * been wiped when the last active reference to the fscrypt_master_key * struct was dropped; doing it here would be unnecessarily late. * Nevertheless, use kfree_sensitive() in case anything was missed. */ kfree_sensitive(mk); } void fscrypt_put_master_key(struct fscrypt_master_key *mk) { if (!refcount_dec_and_test(&mk->mk_struct_refs)) return; /* * No structural references left, so free ->mk_users, and also free the * fscrypt_master_key struct itself after an RCU grace period ensures * that concurrent keyring lookups can no longer find it. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 0); if (mk->mk_users) { /* Clear the keyring so the quota gets released right away. */ keyring_clear(mk->mk_users); key_put(mk->mk_users); mk->mk_users = NULL; } call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); } void fscrypt_put_master_key_activeref(struct super_block *sb, struct fscrypt_master_key *mk) { size_t i; if (!refcount_dec_and_test(&mk->mk_active_refs)) return; /* * No active references left, so complete the full removal of this * fscrypt_master_key struct by removing it from the keyring and * destroying any subkeys embedded in it. */ if (WARN_ON_ONCE(!sb->s_master_keys)) return; spin_lock(&sb->s_master_keys->lock); hlist_del_rcu(&mk->mk_node); spin_unlock(&sb->s_master_keys->lock); /* * ->mk_active_refs == 0 implies that ->mk_present is false and * ->mk_decrypted_inodes is empty. */ WARN_ON_ONCE(mk->mk_present); WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes)); for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key( sb, &mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_64_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_32_keys[i]); } memzero_explicit(&mk->mk_ino_hash_key, sizeof(mk->mk_ino_hash_key)); mk->mk_ino_hash_key_initialized = false; /* Drop the structural ref associated with the active refs. */ fscrypt_put_master_key(mk); } /* * This transitions the key state from present to incompletely removed, and then * potentially to absent (depending on whether inodes remain). */ static void fscrypt_initiate_key_removal(struct super_block *sb, struct fscrypt_master_key *mk) { WRITE_ONCE(mk->mk_present, false); wipe_master_key_secret(&mk->mk_secret); fscrypt_put_master_key_activeref(sb, mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) { if (spec->__reserved) return false; return master_key_spec_len(spec) != 0; } static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { /* * We just charge FSCRYPT_MAX_RAW_KEY_SIZE bytes to the user's key quota * for each key, regardless of the exact key size. The amount of memory * actually used is greater than the size of the raw key anyway. */ return key_payload_reserve(key, FSCRYPT_MAX_RAW_KEY_SIZE); } static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); } /* * Type of key in ->mk_users. Each key of this type represents a particular * user who has added a particular master key. * * Note that the name of this key type really should be something like * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen * mainly for simplicity of presentation in /proc/keys when read by a non-root * user. And it is expected to be rare that a key is actually added by multiple * users, since users should keep their encryption keys confidential. */ static struct key_type key_type_fscrypt_user = { .name = ".fscrypt", .instantiate = fscrypt_user_key_instantiate, .describe = fscrypt_user_key_describe, }; #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ CONST_STRLEN("-users") + 1) #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) static void format_mk_users_keyring_description( char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "fscrypt-%*phN-users", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier); } static void format_mk_user_description( char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier, __kuid_val(current_fsuid())); } /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring; if (sb->s_master_keys) return 0; keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); if (!keyring) return -ENOMEM; spin_lock_init(&keyring->lock); /* * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). * I.e., here we publish ->s_master_keys with a RELEASE barrier so that * concurrent tasks can ACQUIRE it. */ smp_store_release(&sb->s_master_keys, keyring); return 0; } /* * Release all encryption keys that have been added to the filesystem, along * with the keyring that contains them. * * This is called at unmount time, after all potentially-encrypted inodes have * been evicted. The filesystem's underlying block device(s) are still * available at this time; this is important because after user file accesses * have been allowed, this function may need to evict keys from the keyslots of * an inline crypto engine, which requires the block device(s). */ void fscrypt_destroy_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; if (!keyring) return; for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { struct hlist_head *bucket = &keyring->key_hashtable[i]; struct fscrypt_master_key *mk; struct hlist_node *tmp; hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { /* * Since all potentially-encrypted inodes were already * evicted, every key remaining in the keyring should * have an empty inode list, and should only still be in * the keyring due to the single active ref associated * with ->mk_present. There should be no structural * refs beyond the one associated with the active ref. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1); WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1); WARN_ON_ONCE(!mk->mk_present); fscrypt_initiate_key_removal(sb, mk); } } kfree_sensitive(keyring); sb->s_master_keys = NULL; } static struct hlist_head * fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, const struct fscrypt_key_specifier *mk_spec) { /* * Since key specifiers should be "random" values, it is sufficient to * use a trivial hash function that just takes the first several bits of * the key specifier. */ unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; } /* * Find the specified master key struct in ->s_master_keys and take a structural * ref to it. The structural ref guarantees that the key struct continues to * exist, but it does *not* guarantee that ->s_master_keys continues to contain * the key struct. The structural ref needs to be dropped by * fscrypt_put_master_key(). Returns NULL if the key struct is not found. */ struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring; struct hlist_head *bucket; struct fscrypt_master_key *mk; /* * Pairs with the smp_store_release() in allocate_filesystem_keyring(). * I.e., another task can publish ->s_master_keys concurrently, * executing a RELEASE barrier. We need to use smp_load_acquire() here * to safely ACQUIRE the memory the other task published. */ keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) return NULL; /* No keyring yet, so no keys yet. */ bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); rcu_read_lock(); switch (mk_spec->type) { case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && memcmp(mk->mk_spec.u.descriptor, mk_spec->u.descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && memcmp(mk->mk_spec.u.identifier, mk_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; } mk = NULL; out: rcu_read_unlock(); return mk; } static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE]; struct key *keyring; format_mk_users_keyring_description(description, mk->mk_spec.u.identifier); keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); mk->mk_users = keyring; return 0; } /* * Find the current user's "key" in the master key's ->mk_users. * Returns ERR_PTR(-ENOKEY) if not found. */ static struct key *find_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; key_ref_t keyref; format_mk_user_description(description, mk->mk_spec.u.identifier); /* * We need to mark the keyring reference as "possessed" so that we * acquire permission to search it, via the KEY_POS_SEARCH permission. */ keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), &key_type_fscrypt_user, description, false); if (IS_ERR(keyref)) { if (PTR_ERR(keyref) == -EAGAIN || /* not found */ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ keyref = ERR_PTR(-ENOKEY); return ERR_CAST(keyref); } return key_ref_to_ptr(keyref); } /* * Give the current user a "key" in ->mk_users. This charges the user's quota * and marks the master key as added by the current user, so that it cannot be * removed by another user with the key. Either ->mk_sem must be held for * write, or the master key must be still undergoing initialization. */ static int add_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; struct key *mk_user; int err; format_mk_user_description(description, mk->mk_spec.u.identifier); mk_user = key_alloc(&key_type_fscrypt_user, description, current_fsuid(), current_gid(), current_cred(), KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL); key_put(mk_user); return err; } /* * Remove the current user's "key" from ->mk_users. * ->mk_sem must be held for write. * * Returns 0 if removed, -ENOKEY if not found, or another -errno code. */ static int remove_master_key_user(struct fscrypt_master_key *mk) { struct key *mk_user; int err; mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_unlink(mk->mk_users, mk_user); key_put(mk_user); return err; } /* * Allocate a new fscrypt_master_key, transfer the given secret over to it, and * insert it into sb->s_master_keys. */ static int add_new_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring = sb->s_master_keys; struct fscrypt_master_key *mk; int err; mk = kzalloc(sizeof(*mk), GFP_KERNEL); if (!mk) return -ENOMEM; init_rwsem(&mk->mk_sem); refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = allocate_master_key_users_keyring(mk); if (err) goto out_put; err = add_master_key_user(mk); if (err) goto out_put; } move_master_key_secret(&mk->mk_secret, secret); mk->mk_present = true; refcount_set(&mk->mk_active_refs, 1); /* ->mk_present is true */ spin_lock(&keyring->lock); hlist_add_head_rcu(&mk->mk_node, fscrypt_mk_hash_bucket(keyring, mk_spec)); spin_unlock(&keyring->lock); return 0; out_put: fscrypt_put_master_key(mk); return err; } #define KEY_DEAD 1 static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { int err; /* * If the current user is already in ->mk_users, then there's nothing to * do. Otherwise, we need to add the user to ->mk_users. (Neither is * applicable for v1 policy keys, which have NULL ->mk_users.) */ if (mk->mk_users) { struct key *mk_user = find_master_key_user(mk); if (mk_user != ERR_PTR(-ENOKEY)) { if (IS_ERR(mk_user)) return PTR_ERR(mk_user); key_put(mk_user); return 0; } err = add_master_key_user(mk); if (err) return err; } /* If the key is incompletely removed, make it present again. */ if (!mk->mk_present) { if (!refcount_inc_not_zero(&mk->mk_active_refs)) { /* * Raced with the last active ref being dropped, so the * key has become, or is about to become, "absent". * Therefore, we need to allocate a new key struct. */ return KEY_DEAD; } move_master_key_secret(&mk->mk_secret, secret); WRITE_ONCE(mk->mk_present, true); } return 0; } static int do_add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); struct fscrypt_master_key *mk; int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ mk = fscrypt_find_master_key(sb, mk_spec); if (!mk) { /* Didn't find the key in ->s_master_keys. Add it. */ err = allocate_filesystem_keyring(sb); if (!err) err = add_new_master_key(sb, secret, mk_spec); } else { /* * Found the key in ->s_master_keys. Add the user to ->mk_users * if needed, and make the key "present" again if possible. */ down_write(&mk->mk_sem); err = add_existing_master_key(mk, secret); up_write(&mk->mk_sem); if (err == KEY_DEAD) { /* * We found a key struct, but it's already been fully * removed. Ignore the old struct and add a new one. * fscrypt_add_key_mutex means we don't need to worry * about concurrent adds. */ err = add_new_master_key(sb, secret, mk_spec); } fscrypt_put_master_key(mk); } mutex_unlock(&fscrypt_add_key_mutex); return err; } static int add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, struct fscrypt_key_specifier *key_spec) { int err; if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]; u8 *kdf_key = secret->bytes; unsigned int kdf_key_size = secret->size; u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY; /* * For raw keys, the fscrypt master key is used directly as the * fscrypt KDF key. For hardware-wrapped keys, we have to pass * the master key to the hardware to derive the KDF key, which * is then only used to derive non-file-contents subkeys. */ if (secret->is_hw_wrapped) { err = fscrypt_derive_sw_secret(sb, secret->bytes, secret->size, sw_secret); if (err) return err; kdf_key = sw_secret; kdf_key_size = sizeof(sw_secret); /* * To avoid weird behavior if someone manages to * determine sw_secret and add it as a raw key, ensure * that hardware-wrapped keys and raw keys will have * different key identifiers by deriving their key * identifiers using different KDF contexts. */ keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY; } fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size); /* * Now that the KDF context is initialized, the raw KDF key is * no longer needed. */ memzero_explicit(kdf_key, kdf_key_size); /* Calculate the key identifier */ fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0, key_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); } return do_add_master_key(sb, secret, key_spec); } /* * Validate the size of an fscrypt master key being added. Note that this is * just an initial check, as we don't know which ciphers will be used yet. * There is a stricter size check later when the key is actually used by a file. */ static inline bool fscrypt_valid_key_size(size_t size, u32 add_key_flags) { u32 max_size = (add_key_flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_RAW_KEY_SIZE; return size >= FSCRYPT_MIN_KEY_SIZE && size <= max_size; } static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; if (prep->datalen < sizeof(*payload)) return -EINVAL; if (!fscrypt_valid_key_size(prep->datalen - sizeof(*payload), payload->flags)) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; if (payload->flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL); if (!prep->payload.data[0]) return -ENOMEM; prep->quotalen = prep->datalen; return 0; } static void fscrypt_provisioning_key_free_preparse( struct key_preparsed_payload *prep) { kfree_sensitive(prep->payload.data[0]); } static void fscrypt_provisioning_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) { const struct fscrypt_provisioning_key_payload *payload = key->payload.data[0]; seq_printf(m, ": %u [%u]", key->datalen, payload->type); } } static void fscrypt_provisioning_key_destroy(struct key *key) { kfree_sensitive(key->payload.data[0]); } static struct key_type key_type_fscrypt_provisioning = { .name = "fscrypt-provisioning", .preparse = fscrypt_provisioning_key_preparse, .free_preparse = fscrypt_provisioning_key_free_preparse, .instantiate = generic_key_instantiate, .describe = fscrypt_provisioning_key_describe, .destroy = fscrypt_provisioning_key_destroy, }; /* * Retrieve the key from the Linux keyring key specified by 'key_id', and store * it into 'secret'. * * The key must be of type "fscrypt-provisioning" and must have the 'type' and * 'flags' field of the payload set to the given values, indicating that the key * is intended for use for the specified purpose. We don't use the "logon" key * type because there's no way to completely restrict the use of such keys; they * can be used by any kernel API that accepts "logon" keys and doesn't require a * specific service prefix. * * The ability to specify the key via Linux keyring key is intended for cases * where userspace needs to re-add keys after the filesystem is unmounted and * re-mounted. Most users should just provide the key directly instead. */ static int get_keyring_key(u32 key_id, u32 type, u32 flags, struct fscrypt_master_key_secret *secret) { key_ref_t ref; struct key *key; const struct fscrypt_provisioning_key_payload *payload; int err; ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH); if (IS_ERR(ref)) return PTR_ERR(ref); key = key_ref_to_ptr(ref); if (key->type != &key_type_fscrypt_provisioning) goto bad_key; payload = key->payload.data[0]; /* * Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. * Similarly, don't allow hardware-wrapped keys to be used as * non-hardware-wrapped keys and vice versa. */ if (payload->type != type || payload->flags != flags) goto bad_key; secret->size = key->datalen - sizeof(*payload); memcpy(secret->bytes, payload->raw, secret->size); err = 0; goto out_put; bad_key: err = -EKEYREJECTED; out_put: key_ref_put(ref); return err; } /* * Add a master encryption key to the filesystem, causing all files which were * encrypted with it to appear "unlocked" (decrypted) when accessed. * * When adding a key for use by v1 encryption policies, this ioctl is * privileged, and userspace must provide the 'key_descriptor'. * * When adding a key for use by v2+ encryption policies, this ioctl is * unprivileged. This is needed, in general, to allow non-root users to use * encryption without encountering the visibility problems of process-subscribed * keyrings and the inability to properly remove keys. This works by having * each key identified by its cryptographically secure hash --- the * 'key_identifier'. The cryptographic hash ensures that a malicious user * cannot add the wrong key for a given identifier. Furthermore, each added key * is charged to the appropriate user's quota for the keyrings service, which * prevents a malicious user from adding too many keys. Finally, we forbid a * user from removing a key while other users have added it too, which prevents * a user who knows another user's key from causing a denial-of-service by * removing it at an inopportune time. (We tolerate that a user who knows a key * can prevent other users from removing it.) * * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add keys that are identified by an arbitrary descriptor * rather than by a cryptographic hash --- since otherwise a malicious * user could add the wrong key. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; memset(&secret, 0, sizeof(secret)); if (arg.flags) { if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; secret.is_hw_wrapped = true; } if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; err = get_keyring_key(arg.key_id, arg.key_spec.type, arg.flags, &secret); if (err) goto out_wipe_secret; } else { if (!fscrypt_valid_key_size(arg.raw_size, arg.flags)) return -EINVAL; secret.size = arg.raw_size; err = -EFAULT; if (copy_from_user(secret.bytes, uarg->raw, secret.size)) goto out_wipe_secret; } err = add_master_key(sb, &secret, &arg.key_spec); if (err) goto out_wipe_secret; /* Return the key identifier to userspace, if applicable */ err = -EFAULT; if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE)) goto out_wipe_secret; err = 0; out_wipe_secret: wipe_master_key_secret(&secret); return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); static void fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret) { static u8 test_key[FSCRYPT_MAX_RAW_KEY_SIZE]; get_random_once(test_key, sizeof(test_key)); memset(secret, 0, sizeof(*secret)); secret->size = sizeof(test_key); memcpy(secret->bytes, test_key, sizeof(test_key)); } void fscrypt_get_test_dummy_key_identifier( u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_master_key_secret secret; fscrypt_get_test_dummy_secret(&secret); fscrypt_init_hkdf(&secret.hkdf, secret.bytes, secret.size); fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY, NULL, 0, key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); wipe_master_key_secret(&secret); } /** * fscrypt_add_test_dummy_key() - add the test dummy encryption key * @sb: the filesystem instance to add the key to * @key_spec: the key specifier of the test dummy encryption key * * Add the key for the test_dummy_encryption mount option to the filesystem. To * prevent misuse of this mount option, a per-boot random key is used instead of * a hardcoded one. This makes it so that any encrypted files created using * this option won't be accessible after a reboot. * * Return: 0 on success, -errno on failure */ int fscrypt_add_test_dummy_key(struct super_block *sb, struct fscrypt_key_specifier *key_spec) { struct fscrypt_master_key_secret secret; int err; fscrypt_get_test_dummy_secret(&secret); err = add_master_key(sb, &secret, key_spec); wipe_master_key_secret(&secret); return err; } /* * Verify that the current user has added a master key with the given identifier * (returns -ENOKEY if not). This is needed to prevent a user from encrypting * their files using some other user's key which they don't actually know. * Cryptographically this isn't much of a problem, but the semantics of this * would be a bit weird, so it's best to just forbid it. * * The system administrator (CAP_FOWNER) can override this, which should be * enough for any use cases where encryption policies are being set using keys * that were chosen ahead of time but aren't available at the moment. * * Note that the key may have already removed by the time this returns, but * that's okay; we just care whether the key was there at some point. * * Return: 0 if the key is added, -ENOKEY if it isn't, or another -errno code */ int fscrypt_verify_key_added(struct super_block *sb, const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_key_specifier mk_spec; struct fscrypt_master_key *mk; struct key *mk_user; int err; mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); mk = fscrypt_find_master_key(sb, &mk_spec); if (!mk) { err = -ENOKEY; goto out; } down_read(&mk->mk_sem); mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) { err = PTR_ERR(mk_user); } else { key_put(mk_user); err = 0; } up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (err == -ENOKEY && capable(CAP_FOWNER)) err = 0; return err; } /* * Try to evict the inode's dentries from the dentry cache. If the inode is a * directory, then it can have at most one dentry; however, that dentry may be * pinned by child dentries, so first try to evict the children too. */ static void shrink_dcache_inode(struct inode *inode) { struct dentry *dentry; if (S_ISDIR(inode->i_mode)) { dentry = d_find_any_alias(inode); if (dentry) { shrink_dcache_parent(dentry); dput(dentry); } } d_prune_aliases(inode); } static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk) { struct fscrypt_inode_info *ci; struct inode *inode; struct inode *toput_inode = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { inode = ci->ci_inode; spin_lock(&inode->i_lock); if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&mk->mk_decrypted_inodes_lock); shrink_dcache_inode(inode); iput(toput_inode); toput_inode = inode; spin_lock(&mk->mk_decrypted_inodes_lock); } spin_unlock(&mk->mk_decrypted_inodes_lock); iput(toput_inode); } static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_master_key *mk) { struct list_head *pos; size_t busy_count = 0; unsigned long ino; char ino_str[50] = ""; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each(pos, &mk->mk_decrypted_inodes) busy_count++; if (busy_count == 0) { spin_unlock(&mk->mk_decrypted_inodes_lock); return 0; } { /* select an example file to show for debugging purposes */ struct inode *inode = list_first_entry(&mk->mk_decrypted_inodes, struct fscrypt_inode_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; } spin_unlock(&mk->mk_decrypted_inodes_lock); /* If the inode is currently being created, ino may still be 0. */ if (ino) snprintf(ino_str, sizeof(ino_str), ", including ino %lu", ino); fscrypt_warn(NULL, "%s: %zu inode(s) still busy after removing key with %s %*phN%s", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, ino_str); return -EBUSY; } static int try_to_lock_encrypted_files(struct super_block *sb, struct fscrypt_master_key *mk) { int err1; int err2; /* * An inode can't be evicted while it is dirty or has dirty pages. * Thus, we first have to clean the inodes in ->mk_decrypted_inodes. * * Just do it the easy way: call sync_filesystem(). It's overkill, but * it works, and it's more important to minimize the amount of caches we * drop than the amount of data we sync. Also, unprivileged users can * already call sync_filesystem() via sys_syncfs() or sys_sync(). */ down_read(&sb->s_umount); err1 = sync_filesystem(sb); up_read(&sb->s_umount); /* If a sync error occurs, still try to evict as much as possible. */ /* * Inodes are pinned by their dentries, so we have to evict their * dentries. shrink_dcache_sb() would suffice, but would be overkill * and inappropriate for use by unprivileged users. So instead go * through the inodes' alias lists and try to evict each dentry. */ evict_dentries_for_decrypted_inodes(mk); /* * evict_dentries_for_decrypted_inodes() already iput() each inode in * the list; any inodes for which that dropped the last reference will * have been evicted due to fscrypt_drop_inode() detecting the key * removal and telling the VFS to evict the inode. So to finish, we * just need to check whether any inodes couldn't be evicted. */ err2 = check_for_busy_inodes(sb, mk); return err1 ?: err2; } /* * Try to remove an fscrypt master encryption key. * * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's * claim to the key, then removes the key itself if no other users have claims. * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the * key itself. * * To "remove the key itself", first we transition the key to the "incompletely * removed" state, so that no more inodes can be unlocked with it. Then we try * to evict all cached inodes that had been unlocked with the key. * * If all inodes were evicted, then we unlink the fscrypt_master_key from the * keyring. Otherwise it remains in the keyring in the "incompletely removed" * state where it tracks the list of remaining inodes. Userspace can execute * the ioctl again later to retry eviction, or alternatively can re-add the key. * * For more details, see the "Removing keys" section of * Documentation/filesystems/fscrypt.rst. */ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; struct fscrypt_remove_key_arg arg; struct fscrypt_master_key *mk; u32 status_flags = 0; int err; bool inodes_remain; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add and remove keys that are identified by an arbitrary * descriptor rather than by a cryptographic hash. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; /* Find the key being removed. */ mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) return -ENOKEY; down_write(&mk->mk_sem); /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { if (all_users) err = keyring_clear(mk->mk_users); else err = remove_master_key_user(mk); if (err) { up_write(&mk->mk_sem); goto out_put_key; } if (mk->mk_users->keys.nr_leaves_on_tree != 0) { /* * Other users have still added the key too. We removed * the current user's claim to the key, but we still * can't remove the key itself. */ status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; err = 0; up_write(&mk->mk_sem); goto out_put_key; } } /* No user claims remaining. Initiate removal of the key. */ err = -ENOKEY; if (mk->mk_present) { fscrypt_initiate_key_removal(sb, mk); err = 0; } inodes_remain = refcount_read(&mk->mk_active_refs) > 0; up_write(&mk->mk_sem); if (inodes_remain) { /* Some inodes still reference this key; try to evict them. */ err = try_to_lock_encrypted_files(sb, mk); if (err == -EBUSY) { status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY; err = 0; } } /* * We return 0 if we successfully did something: removed a claim to the * key, initiated removal of the key, or tried locking the files again. * Users need to check the informational status flags if they care * whether the key has been fully removed including all files locked. */ out_put_key: fscrypt_put_master_key(mk); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); return err; } int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg) { return do_remove_key(filp, uarg, false); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; return do_remove_key(filp, uarg, true); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users); /* * Retrieve the status of an fscrypt master encryption key. * * We set ->status to indicate whether the key is absent, present, or * incompletely removed. (For an explanation of what these statuses mean and * how they are represented internally, see struct fscrypt_master_key.) This * field allows applications to easily determine the status of an encrypted * directory without using a hack such as trying to open a regular file in it * (which can confuse the "incompletely removed" status with absent or present). * * In addition, for v2 policy keys we allow applications to determine, via * ->status_flags and ->user_count, whether the key has been added by the * current user, by other users, or by both. Most applications should not need * this, since ordinarily only one user should know a given key. However, if a * secret key is shared by multiple users, applications may wish to add an * already-present key to prevent other users from removing it. This ioctl can * be used to check whether that really is the case before the work is done to * add the key --- which might e.g. require prompting the user for a passphrase. * * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_get_key_status_arg arg; struct fscrypt_master_key *mk; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; arg.status_flags = 0; arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) { arg.status = FSCRYPT_KEY_STATUS_ABSENT; err = 0; goto out; } down_read(&mk->mk_sem); if (!mk->mk_present) { arg.status = refcount_read(&mk->mk_active_refs) > 0 ? FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; err = 0; goto out_release_key; } arg.status = FSCRYPT_KEY_STATUS_PRESENT; if (mk->mk_users) { struct key *mk_user; arg.user_count = mk->mk_users->keys.nr_leaves_on_tree; mk_user = find_master_key_user(mk); if (!IS_ERR(mk_user)) { arg.status_flags |= FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF; key_put(mk_user); } else if (mk_user != ERR_PTR(-ENOKEY)) { err = PTR_ERR(mk_user); goto out_release_key; } } err = 0; out_release_key: up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (!err && copy_to_user(uarg, &arg, sizeof(arg))) err = -EFAULT; return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); int __init fscrypt_init_keyring(void) { int err; err = register_key_type(&key_type_fscrypt_user); if (err) return err; err = register_key_type(&key_type_fscrypt_provisioning); if (err) goto err_unregister_fscrypt_user; return 0; err_unregister_fscrypt_user: unregister_key_type(&key_type_fscrypt_user); return err; } |
| 9 1 1 1 1 1 1 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Google LLC. */ #include <linux/filter.h> #include <linux/bpf.h> #include <linux/btf.h> #include <linux/binfmts.h> #include <linux/lsm_hooks.h> #include <linux/bpf_lsm.h> #include <linux/kallsyms.h> #include <net/bpf_sk_storage.h> #include <linux/bpf_local_storage.h> #include <linux/btf_ids.h> #include <linux/ima.h> #include <linux/bpf-cgroup.h> /* For every LSM hook that allows attachment of BPF programs, declare a nop * function where a BPF program can be attached. */ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ noinline RET bpf_lsm_##NAME(__VA_ARGS__) \ { \ return DEFAULT; \ } #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME) BTF_SET_START(bpf_lsm_hooks) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK BTF_SET_END(bpf_lsm_hooks) BTF_SET_START(bpf_lsm_disabled_hooks) BTF_ID(func, bpf_lsm_vm_enough_memory) BTF_ID(func, bpf_lsm_inode_need_killpriv) BTF_ID(func, bpf_lsm_inode_getsecurity) BTF_ID(func, bpf_lsm_inode_listsecurity) BTF_ID(func, bpf_lsm_inode_copy_up_xattr) BTF_ID(func, bpf_lsm_getselfattr) BTF_ID(func, bpf_lsm_getprocattr) BTF_ID(func, bpf_lsm_setprocattr) #ifdef CONFIG_KEYS BTF_ID(func, bpf_lsm_key_getsecurity) #endif #ifdef CONFIG_AUDIT BTF_ID(func, bpf_lsm_audit_rule_match) #endif BTF_ID(func, bpf_lsm_ismaclabel) BTF_SET_END(bpf_lsm_disabled_hooks) /* List of LSM hooks that should operate on 'current' cgroup regardless * of function signature. */ BTF_SET_START(bpf_lsm_current_hooks) /* operate on freshly allocated sk without any cgroup association */ #ifdef CONFIG_SECURITY_NETWORK BTF_ID(func, bpf_lsm_sk_alloc_security) BTF_ID(func, bpf_lsm_sk_free_security) #endif BTF_SET_END(bpf_lsm_current_hooks) /* List of LSM hooks that trigger while the socket is properly locked. */ BTF_SET_START(bpf_lsm_locked_sockopt_hooks) #ifdef CONFIG_SECURITY_NETWORK BTF_ID(func, bpf_lsm_sock_graft) BTF_ID(func, bpf_lsm_inet_csk_clone) BTF_ID(func, bpf_lsm_inet_conn_established) #endif BTF_SET_END(bpf_lsm_locked_sockopt_hooks) /* List of LSM hooks that trigger while the socket is _not_ locked, * but it's ok to call bpf_{g,s}etsockopt because the socket is still * in the early init phase. */ BTF_SET_START(bpf_lsm_unlocked_sockopt_hooks) #ifdef CONFIG_SECURITY_NETWORK BTF_ID(func, bpf_lsm_socket_post_create) BTF_ID(func, bpf_lsm_socket_socketpair) #endif BTF_SET_END(bpf_lsm_unlocked_sockopt_hooks) #ifdef CONFIG_CGROUP_BPF void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func) { const struct btf_param *args __maybe_unused; if (btf_type_vlen(prog->aux->attach_func_proto) < 1 || btf_id_set_contains(&bpf_lsm_current_hooks, prog->aux->attach_btf_id)) { *bpf_func = __cgroup_bpf_run_lsm_current; return; } #ifdef CONFIG_NET args = btf_params(prog->aux->attach_func_proto); if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCKET]) *bpf_func = __cgroup_bpf_run_lsm_socket; else if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCK]) *bpf_func = __cgroup_bpf_run_lsm_sock; else #endif *bpf_func = __cgroup_bpf_run_lsm_current; } #endif int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { u32 btf_id = prog->aux->attach_btf_id; const char *func_name = prog->aux->attach_func_name; if (!prog->gpl_compatible) { bpf_log(vlog, "LSM programs must have a GPL compatible license\n"); return -EINVAL; } if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) { bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n", btf_id, func_name); return -EINVAL; } if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) { bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", btf_id, func_name); return -EINVAL; } return 0; } /* Mask for all the currently supported BPRM option flags */ #define BPF_F_BRPM_OPTS_MASK BPF_F_BPRM_SECUREEXEC BPF_CALL_2(bpf_bprm_opts_set, struct linux_binprm *, bprm, u64, flags) { if (flags & ~BPF_F_BRPM_OPTS_MASK) return -EINVAL; bprm->secureexec = (flags & BPF_F_BPRM_SECUREEXEC); return 0; } BTF_ID_LIST_SINGLE(bpf_bprm_opts_set_btf_ids, struct, linux_binprm) static const struct bpf_func_proto bpf_bprm_opts_set_proto = { .func = bpf_bprm_opts_set, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &bpf_bprm_opts_set_btf_ids[0], .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_ima_inode_hash, struct inode *, inode, void *, dst, u32, size) { return ima_inode_hash(inode, dst, size); } static bool bpf_ima_inode_hash_allowed(const struct bpf_prog *prog) { return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); } BTF_ID_LIST_SINGLE(bpf_ima_inode_hash_btf_ids, struct, inode) static const struct bpf_func_proto bpf_ima_inode_hash_proto = { .func = bpf_ima_inode_hash, .gpl_only = false, .might_sleep = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &bpf_ima_inode_hash_btf_ids[0], .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, .allowed = bpf_ima_inode_hash_allowed, }; BPF_CALL_3(bpf_ima_file_hash, struct file *, file, void *, dst, u32, size) { return ima_file_hash(file, dst, size); } BTF_ID_LIST_SINGLE(bpf_ima_file_hash_btf_ids, struct, file) static const struct bpf_func_proto bpf_ima_file_hash_proto = { .func = bpf_ima_file_hash, .gpl_only = false, .might_sleep = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &bpf_ima_file_hash_btf_ids[0], .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, .allowed = bpf_ima_inode_hash_allowed, }; BPF_CALL_1(bpf_get_attach_cookie, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); return run_ctx->bpf_cookie; } static const struct bpf_func_proto bpf_get_attach_cookie_proto = { .func = bpf_get_attach_cookie, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; static const struct bpf_func_proto * bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { const struct bpf_func_proto *func_proto; if (prog->expected_attach_type == BPF_LSM_CGROUP) { func_proto = cgroup_common_func_proto(func_id, prog); if (func_proto) return func_proto; } switch (func_id) { case BPF_FUNC_inode_storage_get: return &bpf_inode_storage_get_proto; case BPF_FUNC_inode_storage_delete: return &bpf_inode_storage_delete_proto; #ifdef CONFIG_NET case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; #endif /* CONFIG_NET */ case BPF_FUNC_spin_lock: return &bpf_spin_lock_proto; case BPF_FUNC_spin_unlock: return &bpf_spin_unlock_proto; case BPF_FUNC_bprm_opts_set: return &bpf_bprm_opts_set_proto; case BPF_FUNC_ima_inode_hash: return &bpf_ima_inode_hash_proto; case BPF_FUNC_ima_file_hash: return &bpf_ima_file_hash_proto; case BPF_FUNC_get_attach_cookie: return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto : NULL; #ifdef CONFIG_NET case BPF_FUNC_setsockopt: if (prog->expected_attach_type != BPF_LSM_CGROUP) return NULL; if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks, prog->aux->attach_btf_id)) return &bpf_sk_setsockopt_proto; if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks, prog->aux->attach_btf_id)) return &bpf_unlocked_sk_setsockopt_proto; return NULL; case BPF_FUNC_getsockopt: if (prog->expected_attach_type != BPF_LSM_CGROUP) return NULL; if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks, prog->aux->attach_btf_id)) return &bpf_sk_getsockopt_proto; if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks, prog->aux->attach_btf_id)) return &bpf_unlocked_sk_getsockopt_proto; return NULL; #endif default: return tracing_prog_func_proto(func_id, prog); } } /* The set of hooks which are called without pagefaults disabled and are allowed * to "sleep" and thus can be used for sleepable BPF programs. */ BTF_SET_START(sleepable_lsm_hooks) BTF_ID(func, bpf_lsm_bpf) BTF_ID(func, bpf_lsm_bpf_map) BTF_ID(func, bpf_lsm_bpf_map_create) BTF_ID(func, bpf_lsm_bpf_map_free) BTF_ID(func, bpf_lsm_bpf_prog) BTF_ID(func, bpf_lsm_bpf_prog_load) BTF_ID(func, bpf_lsm_bpf_prog_free) BTF_ID(func, bpf_lsm_bpf_token_create) BTF_ID(func, bpf_lsm_bpf_token_free) BTF_ID(func, bpf_lsm_bpf_token_cmd) BTF_ID(func, bpf_lsm_bpf_token_capable) BTF_ID(func, bpf_lsm_bprm_check_security) BTF_ID(func, bpf_lsm_bprm_committed_creds) BTF_ID(func, bpf_lsm_bprm_committing_creds) BTF_ID(func, bpf_lsm_bprm_creds_for_exec) BTF_ID(func, bpf_lsm_bprm_creds_from_file) BTF_ID(func, bpf_lsm_capget) BTF_ID(func, bpf_lsm_capset) BTF_ID(func, bpf_lsm_cred_prepare) BTF_ID(func, bpf_lsm_file_ioctl) BTF_ID(func, bpf_lsm_file_lock) BTF_ID(func, bpf_lsm_file_open) BTF_ID(func, bpf_lsm_file_post_open) BTF_ID(func, bpf_lsm_file_receive) BTF_ID(func, bpf_lsm_inode_create) BTF_ID(func, bpf_lsm_inode_free_security) BTF_ID(func, bpf_lsm_inode_getattr) BTF_ID(func, bpf_lsm_inode_getxattr) BTF_ID(func, bpf_lsm_inode_mknod) BTF_ID(func, bpf_lsm_inode_need_killpriv) BTF_ID(func, bpf_lsm_inode_post_setxattr) BTF_ID(func, bpf_lsm_inode_post_removexattr) BTF_ID(func, bpf_lsm_inode_readlink) BTF_ID(func, bpf_lsm_inode_removexattr) BTF_ID(func, bpf_lsm_inode_rename) BTF_ID(func, bpf_lsm_inode_rmdir) BTF_ID(func, bpf_lsm_inode_setattr) BTF_ID(func, bpf_lsm_inode_setxattr) BTF_ID(func, bpf_lsm_inode_symlink) BTF_ID(func, bpf_lsm_inode_unlink) BTF_ID(func, bpf_lsm_kernel_module_request) BTF_ID(func, bpf_lsm_kernel_read_file) BTF_ID(func, bpf_lsm_kernfs_init_security) #ifdef CONFIG_SECURITY_PATH BTF_ID(func, bpf_lsm_path_unlink) BTF_ID(func, bpf_lsm_path_mkdir) BTF_ID(func, bpf_lsm_path_rmdir) BTF_ID(func, bpf_lsm_path_truncate) BTF_ID(func, bpf_lsm_path_symlink) BTF_ID(func, bpf_lsm_path_link) BTF_ID(func, bpf_lsm_path_rename) BTF_ID(func, bpf_lsm_path_chmod) BTF_ID(func, bpf_lsm_path_chown) #endif /* CONFIG_SECURITY_PATH */ BTF_ID(func, bpf_lsm_mmap_file) BTF_ID(func, bpf_lsm_netlink_send) BTF_ID(func, bpf_lsm_path_notify) BTF_ID(func, bpf_lsm_release_secctx) BTF_ID(func, bpf_lsm_sb_alloc_security) BTF_ID(func, bpf_lsm_sb_eat_lsm_opts) BTF_ID(func, bpf_lsm_sb_kern_mount) BTF_ID(func, bpf_lsm_sb_mount) BTF_ID(func, bpf_lsm_sb_remount) BTF_ID(func, bpf_lsm_sb_set_mnt_opts) BTF_ID(func, bpf_lsm_sb_show_options) BTF_ID(func, bpf_lsm_sb_statfs) BTF_ID(func, bpf_lsm_sb_umount) BTF_ID(func, bpf_lsm_settime) #ifdef CONFIG_SECURITY_NETWORK BTF_ID(func, bpf_lsm_inet_conn_established) BTF_ID(func, bpf_lsm_socket_accept) BTF_ID(func, bpf_lsm_socket_bind) BTF_ID(func, bpf_lsm_socket_connect) BTF_ID(func, bpf_lsm_socket_create) BTF_ID(func, bpf_lsm_socket_getpeername) BTF_ID(func, bpf_lsm_socket_getpeersec_dgram) BTF_ID(func, bpf_lsm_socket_getsockname) BTF_ID(func, bpf_lsm_socket_getsockopt) BTF_ID(func, bpf_lsm_socket_listen) BTF_ID(func, bpf_lsm_socket_post_create) BTF_ID(func, bpf_lsm_socket_recvmsg) BTF_ID(func, bpf_lsm_socket_sendmsg) BTF_ID(func, bpf_lsm_socket_shutdown) BTF_ID(func, bpf_lsm_socket_socketpair) #endif /* CONFIG_SECURITY_NETWORK */ BTF_ID(func, bpf_lsm_syslog) BTF_ID(func, bpf_lsm_task_alloc) BTF_ID(func, bpf_lsm_task_prctl) BTF_ID(func, bpf_lsm_task_setscheduler) BTF_ID(func, bpf_lsm_task_to_inode) BTF_ID(func, bpf_lsm_userns_create) BTF_SET_END(sleepable_lsm_hooks) BTF_SET_START(untrusted_lsm_hooks) BTF_ID(func, bpf_lsm_bpf_map_free) BTF_ID(func, bpf_lsm_bpf_prog_free) BTF_ID(func, bpf_lsm_file_alloc_security) BTF_ID(func, bpf_lsm_file_free_security) #ifdef CONFIG_SECURITY_NETWORK BTF_ID(func, bpf_lsm_sk_alloc_security) BTF_ID(func, bpf_lsm_sk_free_security) #endif /* CONFIG_SECURITY_NETWORK */ BTF_ID(func, bpf_lsm_task_free) BTF_SET_END(untrusted_lsm_hooks) bool bpf_lsm_is_sleepable_hook(u32 btf_id) { return btf_id_set_contains(&sleepable_lsm_hooks, btf_id); } bool bpf_lsm_is_trusted(const struct bpf_prog *prog) { return !btf_id_set_contains(&untrusted_lsm_hooks, prog->aux->attach_btf_id); } const struct bpf_prog_ops lsm_prog_ops = { }; const struct bpf_verifier_ops lsm_verifier_ops = { .get_func_proto = bpf_lsm_func_proto, .is_valid_access = btf_ctx_access, }; /* hooks return 0 or 1 */ BTF_SET_START(bool_lsm_hooks) #ifdef CONFIG_SECURITY_NETWORK_XFRM BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match) #endif #ifdef CONFIG_AUDIT BTF_ID(func, bpf_lsm_audit_rule_known) #endif BTF_ID(func, bpf_lsm_inode_xattr_skipcap) BTF_SET_END(bool_lsm_hooks) int bpf_lsm_get_retval_range(const struct bpf_prog *prog, struct bpf_retval_range *retval_range) { /* no return value range for void hooks */ if (!prog->aux->attach_func_proto->type) return -EINVAL; if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) { retval_range->minval = 0; retval_range->maxval = 1; } else { /* All other available LSM hooks, except task_prctl, return 0 * on success and negative error code on failure. * To keep things simple, we only allow bpf progs to return 0 * or negative errno for task_prctl too. */ retval_range->minval = -MAX_ERRNO; retval_range->maxval = 0; } return 0; } |
| 19 18 19 18 1 19 19 21 6 6 8 8 3 3 12 11 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/err.h> #include <linux/uuid.h> #include "ctree.h" #include "fs.h" #include "messages.h" #include "transaction.h" #include "disk-io.h" #include "qgroup.h" #include "space-info.h" #include "accessors.h" #include "root-tree.h" #include "orphan.h" /* * Read a root item from the tree. In case we detect a root item smaller then * sizeof(root_item), we know it's an old version of the root structure and * initialize all new fields to zero. The same happens if we detect mismatching * generation numbers as then we know the root was once mounted with an older * kernel that was not aware of the root item structure change. */ static void btrfs_read_root_item(struct extent_buffer *eb, int slot, struct btrfs_root_item *item) { u32 len; int need_reset = 0; len = btrfs_item_size(eb, slot); read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), min_t(u32, len, sizeof(*item))); if (len < sizeof(*item)) need_reset = 1; if (!need_reset && btrfs_root_generation(item) != btrfs_root_generation_v2(item)) { if (btrfs_root_generation_v2(item) != 0) { btrfs_warn(eb->fs_info, "mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields."); } need_reset = 1; } if (need_reset) { /* Clear all members from generation_v2 onwards. */ memset_startat(item, 0, generation_v2); generate_random_guid(item->uuid); } } /* * Lookup the root by the key. * * root: the root of the root tree * search_key: the key to search * path: the path we search * root_item: the root item of the tree we look for * root_key: the root key of the tree we look for * * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset * of the search key, just lookup the root with the highest offset for a * given objectid. * * If we find something return 0, otherwise > 0, < 0 on error. */ int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, struct btrfs_path *path, struct btrfs_root_item *root_item, struct btrfs_key *root_key) { struct btrfs_key found_key; struct extent_buffer *l; int ret; int slot; ret = btrfs_search_slot(NULL, root, search_key, path, 0, 0); if (ret < 0) return ret; if (search_key->offset != -1ULL) { /* the search key is exact */ if (ret > 0) goto out; } else { /* * Key with offset -1 found, there would have to exist a root * with such id, but this is out of the valid range. */ if (unlikely(ret == 0)) { ret = -EUCLEAN; goto out; } if (path->slots[0] == 0) goto out; path->slots[0]--; ret = 0; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != search_key->objectid || found_key.type != BTRFS_ROOT_ITEM_KEY) { ret = 1; goto out; } if (root_item) btrfs_read_root_item(l, slot, root_item); if (root_key) memcpy(root_key, &found_key, sizeof(found_key)); out: btrfs_release_path(path); return ret; } void btrfs_set_root_node(struct btrfs_root_item *item, struct extent_buffer *node) { btrfs_set_root_bytenr(item, node->start); btrfs_set_root_level(item, btrfs_header_level(node)); btrfs_set_root_generation(item, btrfs_header_generation(node)); } /* * copy the data in 'item' into the btree */ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item) { struct btrfs_fs_info *fs_info = root->fs_info; BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *l; int ret; int slot; unsigned long ptr; u32 old_len; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, 0, 1); if (ret < 0) return ret; if (unlikely(ret > 0)) { btrfs_crit(fs_info, "unable to find root key " BTRFS_KEY_FMT " in tree %llu", BTRFS_KEY_FMT_VALUE(key), btrfs_root_id(root)); ret = -EUCLEAN; btrfs_abort_transaction(trans, ret); return ret; } l = path->nodes[0]; slot = path->slots[0]; ptr = btrfs_item_ptr_offset(l, slot); old_len = btrfs_item_size(l, slot); /* * If this is the first time we update the root item which originated * from an older kernel, we need to enlarge the item size to make room * for the added fields. */ if (old_len < sizeof(*item)) { btrfs_release_path(path); ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (unlikely(ret < 0)) { btrfs_abort_transaction(trans, ret); return ret; } ret = btrfs_del_item(trans, root, path); if (unlikely(ret < 0)) { btrfs_abort_transaction(trans, ret); return ret; } btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (unlikely(ret < 0)) { btrfs_abort_transaction(trans, ret); return ret; } l = path->nodes[0]; slot = path->slots[0]; ptr = btrfs_item_ptr_offset(l, slot); } /* * Update generation_v2 so at the next mount we know the new root * fields are valid. */ btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); write_extent_buffer(l, item, ptr, sizeof(*item)); return ret; } int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, const struct btrfs_key *key, struct btrfs_root_item *item) { /* * Make sure generation v1 and v2 match. See update_root for details. */ btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); return btrfs_insert_item(trans, root, key, item, sizeof(*item)); } int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) { struct btrfs_root *tree_root = fs_info->tree_root; struct extent_buffer *leaf; BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct btrfs_root *root; int err = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = 0; while (1) { u64 root_objectid; ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) { err = ret; break; } leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(tree_root, path); if (ret < 0) err = ret; if (ret != 0) break; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); if (key.objectid != BTRFS_ORPHAN_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) break; root_objectid = key.offset; key.offset++; root = btrfs_get_fs_root(fs_info, root_objectid, false); err = PTR_ERR_OR_ZERO(root); if (err && err != -ENOENT) { break; } else if (err == -ENOENT) { struct btrfs_trans_handle *trans; btrfs_release_path(path); trans = btrfs_join_transaction(tree_root); if (IS_ERR(trans)) { err = PTR_ERR(trans); btrfs_handle_fs_error(fs_info, err, "Failed to start trans to delete orphan item"); break; } err = btrfs_del_orphan_item(trans, tree_root, root_objectid); btrfs_end_transaction(trans); if (err) { btrfs_handle_fs_error(fs_info, err, "Failed to delete root orphan item"); break; } continue; } WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)); if (btrfs_root_refs(&root->root_item) == 0) { struct btrfs_key drop_key; btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress); /* * If we have a non-zero drop_progress then we know we * made it partly through deleting this snapshot, and * thus we need to make sure we block any balance from * happening until this snapshot is completely dropped. */ if (drop_key.objectid != 0 || drop_key.type != 0 || drop_key.offset != 0) { set_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags); set_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); } set_bit(BTRFS_ROOT_DEAD_TREE, &root->state); btrfs_add_dead_root(root); } btrfs_put_root(root); } return err; } /* drop the root item for 'key' from the tree root */ int btrfs_del_root(struct btrfs_trans_handle *trans, const struct btrfs_key *key) { struct btrfs_root *root = trans->fs_info->tree_root; BTRFS_PATH_AUTO_FREE(path); int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) return ret; if (unlikely(ret > 0)) /* The root must exist but we did not find it by the key. */ return -EUCLEAN; return btrfs_del_item(trans, root, path); } int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, const struct fscrypt_str *name) { struct btrfs_root *tree_root = trans->fs_info->tree_root; BTRFS_PATH_AUTO_FREE(path); struct btrfs_root_ref *ref; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = root_id; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = ref_id; again: ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) { return ret; } else if (ret == 0) { leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); ptr = (unsigned long)(ref + 1); if ((btrfs_root_ref_dirid(leaf, ref) != dirid) || (btrfs_root_ref_name_len(leaf, ref) != name->len) || memcmp_extent_buffer(leaf, name->name, ptr, name->len)) return -ENOENT; *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); if (ret) return ret; } else { return -ENOENT; } if (key.type == BTRFS_ROOT_BACKREF_KEY) { btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; goto again; } return ret; } /* * add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY * or BTRFS_ROOT_BACKREF_KEY. * * The dirid, sequence, name and name_len refer to the directory entry * that is referencing the root. * * For a forward ref, the root_id is the id of the tree referencing * the root and ref_id is the id of the subvol or snapshot. * * For a back ref the root_id is the id of the subvol or snapshot and * ref_id is the id of the tree referencing it. * * Will return 0, -ENOMEM, or anything from the CoW path */ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, u64 ref_id, u64 dirid, u64 sequence, const struct fscrypt_str *name) { struct btrfs_root *tree_root = trans->fs_info->tree_root; struct btrfs_key key; int ret; BTRFS_PATH_AUTO_FREE(path); struct btrfs_root_ref *ref; struct extent_buffer *leaf; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = root_id; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = ref_id; again: ret = btrfs_insert_empty_item(trans, tree_root, path, &key, sizeof(*ref) + name->len); if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); return ret; } leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_set_root_ref_dirid(leaf, ref, dirid); btrfs_set_root_ref_sequence(leaf, ref, sequence); btrfs_set_root_ref_name_len(leaf, ref, name->len); ptr = (unsigned long)(ref + 1); write_extent_buffer(leaf, name->name, ptr, name->len); if (key.type == BTRFS_ROOT_BACKREF_KEY) { btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; goto again; } return 0; } /* * Old btrfs forgets to init root_item->flags and root_item->byte_limit * for subvolumes. To work around this problem, we steal a bit from * root_item->inode_item->flags, and use it to indicate if those fields * have been properly initialized. */ void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) { u64 inode_flags = btrfs_stack_inode_flags(&root_item->inode); if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; btrfs_set_stack_inode_flags(&root_item->inode, inode_flags); btrfs_set_root_flags(root_item, 0); btrfs_set_root_limit(root_item, 0); } } void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root_item *item = &root->root_item; struct timespec64 ct; ktime_get_real_ts64(&ct); spin_lock(&root->root_item_lock); btrfs_set_root_ctransid(item, trans->transid); btrfs_set_stack_timespec_sec(&item->ctime, ct.tv_sec); btrfs_set_stack_timespec_nsec(&item->ctime, ct.tv_nsec); spin_unlock(&root->root_item_lock); } /* * Reserve space for subvolume operation. * * root: the root of the parent directory * rsv: block reservation * items: the number of items that we need do reservation * use_global_rsv: allow fallback to the global block reservation * * This function is used to reserve the space for snapshot/subvolume * creation and deletion. Those operations are different with the * common file/directory operations, they change two fs/file trees * and root tree, the number of items that the qgroup reserves is * different with the free space reservation. So we can not use * the space reservation mechanism in start_transaction(). */ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int items, bool use_global_rsv) { u64 qgroup_num_bytes = 0; u64 num_bytes; int ret; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; if (btrfs_qgroup_enabled(fs_info)) { /* One for parent inode, two for dir entries */ qgroup_num_bytes = 3 * fs_info->nodesize; ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true, false); if (ret) return ret; } num_bytes = btrfs_calc_insert_metadata_size(fs_info, items); rsv->space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, BTRFS_RESERVE_FLUSH_ALL); if (ret == -ENOSPC && use_global_rsv) ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true); if (ret && qgroup_num_bytes) btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); if (!ret) { spin_lock(&rsv->lock); rsv->qgroup_rsv_reserved += qgroup_num_bytes; spin_unlock(&rsv->lock); } return ret; } |
| 3 4 4 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | // SPDX-License-Identifier: GPL-2.0 #include <linux/tty.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/semaphore.h> #include <linux/sched.h> #include "tty.h" /* Legacy tty mutex glue */ /* * Getting the big tty mutex. */ void tty_lock(struct tty_struct *tty) { tty_kref_get(tty); mutex_lock(&tty->legacy_mutex); } EXPORT_SYMBOL(tty_lock); int tty_lock_interruptible(struct tty_struct *tty) { int ret; tty_kref_get(tty); ret = mutex_lock_interruptible(&tty->legacy_mutex); if (ret) tty_kref_put(tty); return ret; } void tty_unlock(struct tty_struct *tty) { mutex_unlock(&tty->legacy_mutex); tty_kref_put(tty); } EXPORT_SYMBOL(tty_unlock); void tty_lock_slave(struct tty_struct *tty) { if (tty && tty != tty->link) tty_lock(tty); } void tty_unlock_slave(struct tty_struct *tty) { if (tty && tty != tty->link) tty_unlock(tty); } void tty_set_lock_subclass(struct tty_struct *tty) { lockdep_set_subclass(&tty->legacy_mutex, TTY_LOCK_SLAVE); } |
| 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | // SPDX-License-Identifier: GPL-2.0 /* Builtin firmware support */ #include <linux/firmware.h> #include "../firmware.h" /* Only if FW_LOADER=y */ #ifdef CONFIG_FW_LOADER struct builtin_fw { char *name; void *data; unsigned long size; }; extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; static bool fw_copy_to_prealloc_buf(struct firmware *fw, void *buf, size_t size) { if (!buf) return true; if (size < fw->size) return false; memcpy(buf, fw->data, fw->size); return true; } /** * firmware_request_builtin() - load builtin firmware * @fw: pointer to firmware struct * @name: name of firmware file * * Some use cases in the kernel have a requirement so that no memory allocator * is involved as these calls take place early in boot process. An example is * the x86 CPU microcode loader. In these cases all the caller wants is to see * if the firmware was built-in and if so use it right away. This can be used * for such cases. * * This looks for the firmware in the built-in kernel. Only if the kernel was * built-in with the firmware you are looking for will this return successfully. * * Callers of this API do not need to use release_firmware() as the pointer to * the firmware is expected to be provided locally on the stack of the caller. **/ bool firmware_request_builtin(struct firmware *fw, const char *name) { struct builtin_fw *b_fw; if (!fw) return false; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; return true; } } return false; } EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE"); /** * firmware_request_builtin_buf() - load builtin firmware into optional buffer * @fw: pointer to firmware struct * @name: name of firmware file * @buf: If set this lets you use a pre-allocated buffer so that the built-in * firmware into is copied into. This field can be NULL. It is used by * callers such as request_firmware_into_buf() and * request_partial_firmware_into_buf() * @size: if buf was provided, the max size of the allocated buffer available. * If the built-in firmware does not fit into the pre-allocated @buf this * call will fail. * * This looks for the firmware in the built-in kernel. Only if the kernel was * built-in with the firmware you are looking for will this call possibly * succeed. If you passed a @buf the firmware will be copied into it *iff* the * built-in firmware fits into the pre-allocated buffer size specified in * @size. * * This caller is to be used internally by the firmware_loader only. **/ bool firmware_request_builtin_buf(struct firmware *fw, const char *name, void *buf, size_t size) { if (!firmware_request_builtin(fw, name)) return false; return fw_copy_to_prealloc_buf(fw, buf, size); } bool firmware_is_builtin(const struct firmware *fw) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) if (fw->data == b_fw->data) return true; return false; } #endif |
| 3 3 24 24 23 24 23 21 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 | // SPDX-License-Identifier: GPL-2.0 /* * Functions related to io context handling */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/sched/task.h> #include "blk.h" #include "blk-mq-sched.h" /* * For io context allocations */ static struct kmem_cache *iocontext_cachep; #ifdef CONFIG_BLK_ICQ /** * get_io_context - increment reference count to io_context * @ioc: io_context to get * * Increment reference count to @ioc. */ static void get_io_context(struct io_context *ioc) { BUG_ON(atomic_long_read(&ioc->refcount) <= 0); atomic_long_inc(&ioc->refcount); } /* * Exit an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. */ static void ioc_exit_icq(struct io_cq *icq) { struct elevator_type *et = icq->q->elevator->type; if (icq->flags & ICQ_EXITED) return; if (et->ops.exit_icq) et->ops.exit_icq(icq); icq->flags |= ICQ_EXITED; } static void ioc_exit_icqs(struct io_context *ioc) { struct io_cq *icq; spin_lock_irq(&ioc->lock); hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) ioc_exit_icq(icq); spin_unlock_irq(&ioc->lock); } /* * Release an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. */ static void ioc_destroy_icq(struct io_cq *icq) { struct io_context *ioc = icq->ioc; struct request_queue *q = icq->q; struct elevator_type *et = q->elevator->type; lockdep_assert_held(&ioc->lock); lockdep_assert_held(&q->queue_lock); if (icq->flags & ICQ_DESTROYED) return; radix_tree_delete(&ioc->icq_tree, icq->q->id); hlist_del_init(&icq->ioc_node); list_del_init(&icq->q_node); /* * Both setting lookup hint to and clearing it from @icq are done * under queue_lock. If it's not pointing to @icq now, it never * will. Hint assignment itself can race safely. */ if (rcu_access_pointer(ioc->icq_hint) == icq) rcu_assign_pointer(ioc->icq_hint, NULL); ioc_exit_icq(icq); /* * @icq->q might have gone away by the time RCU callback runs * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; icq->flags |= ICQ_DESTROYED; kfree_rcu(icq, __rcu_head); } /* * Slow path for ioc release in put_io_context(). Performs double-lock * dancing to unlink all icq's and then frees ioc. */ static void ioc_release_fn(struct work_struct *work) { struct io_context *ioc = container_of(work, struct io_context, release_work); spin_lock_irq(&ioc->lock); while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq, ioc_node); struct request_queue *q = icq->q; if (spin_trylock(&q->queue_lock)) { ioc_destroy_icq(icq); spin_unlock(&q->queue_lock); } else { /* Make sure q and icq cannot be freed. */ rcu_read_lock(); /* Re-acquire the locks in the correct order. */ spin_unlock(&ioc->lock); spin_lock(&q->queue_lock); spin_lock(&ioc->lock); ioc_destroy_icq(icq); spin_unlock(&q->queue_lock); rcu_read_unlock(); } } spin_unlock_irq(&ioc->lock); kmem_cache_free(iocontext_cachep, ioc); } /* * Releasing icqs requires reverse order double locking and we may already be * holding a queue_lock. Do it asynchronously from a workqueue. */ static bool ioc_delay_free(struct io_context *ioc) { unsigned long flags; spin_lock_irqsave(&ioc->lock, flags); if (!hlist_empty(&ioc->icq_list)) { queue_work(system_power_efficient_wq, &ioc->release_work); spin_unlock_irqrestore(&ioc->lock, flags); return true; } spin_unlock_irqrestore(&ioc->lock, flags); return false; } /** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * * Walk @q->icq_list and exit all io_cq's. */ void ioc_clear_queue(struct request_queue *q) { spin_lock_irq(&q->queue_lock); while (!list_empty(&q->icq_list)) { struct io_cq *icq = list_first_entry(&q->icq_list, struct io_cq, q_node); /* * Other context won't hold ioc lock to wait for queue_lock, see * details in ioc_release_fn(). */ spin_lock(&icq->ioc->lock); ioc_destroy_icq(icq); spin_unlock(&icq->ioc->lock); } spin_unlock_irq(&q->queue_lock); } #else /* CONFIG_BLK_ICQ */ static inline void ioc_exit_icqs(struct io_context *ioc) { } static inline bool ioc_delay_free(struct io_context *ioc) { return false; } #endif /* CONFIG_BLK_ICQ */ /** * put_io_context - put a reference of io_context * @ioc: io_context to put * * Decrement reference count of @ioc and release it if the count reaches * zero. */ void put_io_context(struct io_context *ioc) { BUG_ON(atomic_long_read(&ioc->refcount) <= 0); if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc)) kmem_cache_free(iocontext_cachep, ioc); } EXPORT_SYMBOL_GPL(put_io_context); /* Called by the exiting task */ void exit_io_context(struct task_struct *task) { struct io_context *ioc; task_lock(task); ioc = task->io_context; task->io_context = NULL; task_unlock(task); if (atomic_dec_and_test(&ioc->active_ref)) { ioc_exit_icqs(ioc); put_io_context(ioc); } } static struct io_context *alloc_io_context(gfp_t gfp_flags, int node) { struct io_context *ioc; ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, node); if (unlikely(!ioc)) return NULL; atomic_long_set(&ioc->refcount, 1); atomic_set(&ioc->active_ref, 1); #ifdef CONFIG_BLK_ICQ spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); INIT_HLIST_HEAD(&ioc->icq_list); INIT_WORK(&ioc->release_work, ioc_release_fn); #endif ioc->ioprio = IOPRIO_DEFAULT; return ioc; } int set_task_ioprio(struct task_struct *task, int ioprio) { int err; const struct cred *cred = current_cred(), *tcred; rcu_read_lock(); tcred = __task_cred(task); if (!uid_eq(tcred->uid, cred->euid) && !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); err = security_task_setioprio(task, ioprio); if (err) return err; task_lock(task); if (unlikely(!task->io_context)) { struct io_context *ioc; task_unlock(task); ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE); if (!ioc) return -ENOMEM; task_lock(task); if (task->flags & PF_EXITING) { kmem_cache_free(iocontext_cachep, ioc); goto out; } if (task->io_context) kmem_cache_free(iocontext_cachep, ioc); else task->io_context = ioc; } task->io_context->ioprio = ioprio; out: task_unlock(task); return 0; } EXPORT_SYMBOL_GPL(set_task_ioprio); int __copy_io(u64 clone_flags, struct task_struct *tsk) { struct io_context *ioc = current->io_context; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { atomic_inc(&ioc->active_ref); tsk->io_context = ioc; } else if (ioprio_valid(ioc->ioprio)) { tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE); if (!tsk->io_context) return -ENOMEM; tsk->io_context->ioprio = ioc->ioprio; } return 0; } #ifdef CONFIG_BLK_ICQ /** * ioc_lookup_icq - lookup io_cq from ioc in io issue path * @q: the associated request_queue * * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called * from io issue path, either return NULL if current issue io to @q for the * first time, or return a valid icq. */ struct io_cq *ioc_lookup_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct io_cq *icq; /* * icq's are indexed from @ioc using radix tree and hint pointer, * both of which are protected with RCU, io issue path ensures that * both request_queue and current task are valid, the found icq * is guaranteed to be valid until the io is done. */ rcu_read_lock(); icq = rcu_dereference(ioc->icq_hint); if (icq && icq->q == q) goto out; icq = radix_tree_lookup(&ioc->icq_tree, q->id); if (icq && icq->q == q) rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ else icq = NULL; out: rcu_read_unlock(); return icq; } EXPORT_SYMBOL(ioc_lookup_icq); /** * ioc_create_icq - create and link io_cq * @q: request_queue of interest * * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they * will be created using @gfp_mask. * * The caller is responsible for ensuring @ioc won't go away and @q is * alive and will stay alive until this function returns. */ static struct io_cq *ioc_create_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct elevator_type *et = q->elevator->type; struct io_cq *icq; /* allocate stuff */ icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO, q->node); if (!icq) return NULL; if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) { kmem_cache_free(et->icq_cache, icq); return NULL; } icq->ioc = ioc; icq->q = q; INIT_LIST_HEAD(&icq->q_node); INIT_HLIST_NODE(&icq->ioc_node); /* lock both q and ioc and try to link @icq */ spin_lock_irq(&q->queue_lock); spin_lock(&ioc->lock); if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { hlist_add_head(&icq->ioc_node, &ioc->icq_list); list_add(&icq->q_node, &q->icq_list); if (et->ops.init_icq) et->ops.init_icq(icq); } else { kmem_cache_free(et->icq_cache, icq); icq = ioc_lookup_icq(q); if (!icq) printk(KERN_ERR "cfq: icq link failed!\n"); } spin_unlock(&ioc->lock); spin_unlock_irq(&q->queue_lock); radix_tree_preload_end(); return icq; } struct io_cq *ioc_find_get_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct io_cq *icq = NULL; if (unlikely(!ioc)) { ioc = alloc_io_context(GFP_ATOMIC, q->node); if (!ioc) return NULL; task_lock(current); if (current->io_context) { kmem_cache_free(iocontext_cachep, ioc); ioc = current->io_context; } else { current->io_context = ioc; } get_io_context(ioc); task_unlock(current); } else { get_io_context(ioc); icq = ioc_lookup_icq(q); } if (!icq) { icq = ioc_create_icq(q); if (!icq) { put_io_context(ioc); return NULL; } } return icq; } EXPORT_SYMBOL_GPL(ioc_find_get_icq); #endif /* CONFIG_BLK_ICQ */ static int __init blk_ioc_init(void) { iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_ioc_init); |
| 8 3683 20 1666 3398 21 677 5 4 4 1 34 19 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ #include <linux/cleanup.h> #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/minmax.h> #include <linux/nospec.h> #include <linux/sched.h> #include <linux/ucopysize.h> #include <asm/uaccess.h> /* * Architectures that support memory tagging (assigning tags to memory regions, * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. * * Passing down mm_struct allows to define untagging rules on per-process * basis. * * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) #endif #ifndef untagged_addr_remote #define untagged_addr_remote(mm, addr) ({ \ mmap_assert_locked(mm); \ untagged_addr(addr); \ }) #endif #ifdef masked_user_access_begin #define can_do_masked_user_access() 1 # ifndef masked_user_write_access_begin # define masked_user_write_access_begin masked_user_access_begin # endif # ifndef masked_user_read_access_begin # define masked_user_read_access_begin masked_user_access_begin #endif #else #define can_do_masked_user_access() 0 #define masked_user_access_begin(src) NULL #define masked_user_read_access_begin(src) NULL #define masked_user_write_access_begin(src) NULL #define mask_user_address(src) (src) #endif /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and * __copy_{to,from}_user{,_inatomic}(). * * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and * return the amount left to copy. They should assume that access_ok() has * already been checked (and succeeded); they should *not* zero-pad anything. * No KASAN or object size checks either - those belong here. * * Both of these functions should attempt to copy size bytes starting at from * into the area starting at to. They must not fetch or store anything * outside of those areas. Return value must be between 0 (everything * copied successfully) and size (nothing copied). * * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting * at to must become equal to the bytes fetched from the corresponding area * starting at from. All data past to + size - N must be left unmodified. * * If copying succeeds, the return value must be 0. If some data cannot be * fetched, it is permitted to copy less than had been fetched; the only * hard requirement is that not storing anything at all (i.e. returning size) * should happen only when nothing could be copied. In other words, you don't * have to squeeze as much as possible - it is allowed, but not necessary. * * For raw_copy_from_user() to always points to kernel memory and no faults * on store should happen. Interpretation of from is affected by set_fs(). * For raw_copy_to_user() it's the other way round. * * Both can be inlined - it's up to architectures whether it wants to bother * with that. They should not be used directly; they are used to implement * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) * that are used instead. Out of those, __... ones are inlined. Plain * copy_{to,from}_user() might or might not be inlined. If you want them * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. * * NOTE: only copy_from_user() zero-pads the destination in case of short copy. * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything * at all; their callers absolutely must check the return value. * * Biarch ones should also provide raw_copy_in_user() - similar to the above, * but both source and destination are __user pointers (affected by set_fs() * as usual) and both source and destination can trigger faults. */ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { unsigned long res; instrument_copy_from_user_before(to, from, n); check_object_size(to, n, false); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); return res; } static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res; might_fault(); instrument_copy_from_user_before(to, from, n); if (should_fail_usercopy()) return n; check_object_size(to, n, false); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); return res; } /** * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. */ static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (should_fail_usercopy()) return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (should_fail_usercopy()) return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } /* * Architectures that #define INLINE_COPY_TO_USER use this function * directly in the normal copy_to/from_user(), the other ones go * through an extern _copy_to/from_user(), which expands the same code * here. */ static inline __must_check unsigned long _inline_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; might_fault(); if (should_fail_usercopy()) goto fail; if (can_do_masked_user_access()) from = mask_user_address(from); else { if (!access_ok(from, n)) goto fail; /* * Ensure that bad access_ok() speculation will not * lead to nasty side effects *after* the copy is * finished: */ barrier_nospec(); } instrument_copy_from_user_before(to, from, n); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); if (likely(!res)) return 0; fail: memset(to + (n - res), 0, res); return res; } extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); static inline __must_check unsigned long _inline_copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (should_fail_usercopy()) return n; if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; } extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (!check_copy_size(to, n, false)) return n; #ifdef INLINE_COPY_FROM_USER return _inline_copy_from_user(to, from, n); #else return _copy_from_user(to, from, n); #endif } static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { if (!check_copy_size(from, n, true)) return n; #ifdef INLINE_COPY_TO_USER return _inline_copy_to_user(to, from, n); #else return _copy_to_user(to, from, n); #endif } #ifndef copy_mc_to_kernel /* * Without arch opt-in this generic copy_mc_to_kernel() will not handle * #MC (or arch equivalent) during source read. */ static inline unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, size_t cnt) { memcpy(dst, src, cnt); return 0; } #endif static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; } static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; } /* * These routines enable/disable the pagefault handler. If disabled, it will * not take any locks and go straight to the fixup table. * * User access methods will not sleep when called from a pagefault_disabled() * environment. */ static inline void pagefault_disable(void) { pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault * can hit. */ barrier(); } static inline void pagefault_enable(void) { /* * make sure to issue those last loads/stores before enabling * the pagefault handler again. */ barrier(); pagefault_disabled_dec(); } /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ static inline bool pagefault_disabled(void) { return current->pagefault_disabled != 0; } /* * The pagefault handler is in general disabled by pagefault_disable() or * when in irq context (via in_atomic()). * * This function should only be used by the fault handlers. Other users should * stick to pagefault_disabled(). * Please NEVER use preempt_disable() to disable the fault handler. With * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. */ #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable()) #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS /** * probe_subpage_writeable: probe the user range for write faults at sub-page * granularity (e.g. arm64 MTE) * @uaddr: start of address range * @size: size of address range * * Returns 0 on success, the number of bytes not probed on fault. * * It is expected that the caller checked for the write permission of each * page in the range either by put_user() or GUP. The architecture port can * implement a more efficient get_user() probing if the same sub-page faults * are triggered by either a read or a write. */ static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size) { return 0; } #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ #ifndef ARCH_HAS_NOCACHE_UACCESS static inline __must_check unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { return __copy_from_user_inatomic(to, from, n); } #endif /* ARCH_HAS_NOCACHE_UACCESS */ extern __must_check int check_zeroed_user(const void __user *from, size_t size); /** * copy_struct_from_user: copy a struct from userspace * @dst: Destination address, in kernel space. This buffer must be @ksize * bytes long. * @ksize: Size of @dst struct. * @src: Source address, in userspace. * @usize: (Alleged) size of @src struct. * * Copies a struct from userspace to kernel space, in a way that guarantees * backwards-compatibility for struct syscall arguments (as long as future * struct extensions are made such that all new fields are *appended* to the * old struct, and zeroed-out new fields have the same meaning as the old * struct). * * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. * The recommended usage is something like the following: * * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) * { * int err; * struct foo karg = {}; * * if (usize > PAGE_SIZE) * return -E2BIG; * if (usize < FOO_SIZE_VER0) * return -EINVAL; * * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); * if (err) * return err; * * // ... * } * * There are three cases to consider: * * If @usize == @ksize, then it's copied verbatim. * * If @usize < @ksize, then the userspace has passed an old struct to a * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) * are to be zero-filled. * * If @usize > @ksize, then the userspace has passed a new struct to an * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) * are checked to ensure they are zeroed, otherwise -E2BIG is returned. * * Returns (in all cases, some data may have been copied): * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. * * -EFAULT: access to userspace failed. */ static __always_inline __must_check int copy_struct_from_user(void *dst, size_t ksize, const void __user *src, size_t usize) { size_t size = min(ksize, usize); size_t rest = max(ksize, usize) - size; /* Double check if ksize is larger than a known object size. */ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) return -E2BIG; /* Deal with trailing bytes. */ if (usize < ksize) { memset(dst + size, 0, rest); } else if (usize > ksize) { int ret = check_zeroed_user(src + size, rest); if (ret <= 0) return ret ?: -E2BIG; } /* Copy the interoperable parts of the struct. */ if (copy_from_user(dst, src, size)) return -EFAULT; return 0; } /** * copy_struct_to_user: copy a struct to userspace * @dst: Destination address, in userspace. This buffer must be @ksize * bytes long. * @usize: (Alleged) size of @dst struct. * @src: Source address, in kernel space. * @ksize: Size of @src struct. * @ignored_trailing: Set to %true if there was a non-zero byte in @src that * userspace cannot see because they are using an smaller struct. * * Copies a struct from kernel space to userspace, in a way that guarantees * backwards-compatibility for struct syscall arguments (as long as future * struct extensions are made such that all new fields are *appended* to the * old struct, and zeroed-out new fields have the same meaning as the old * struct). * * Some syscalls may wish to make sure that userspace knows about everything in * the struct, and if there is a non-zero value that userspce doesn't know * about, they want to return an error (such as -EMSGSIZE) or have some other * fallback (such as adding a "you're missing some information" flag). If * @ignored_trailing is non-%NULL, it will be set to %true if there was a * non-zero byte that could not be copied to userspace (ie. was past @usize). * * While unconditionally returning an error in this case is the simplest * solution, for maximum backward compatibility you should try to only return * -EMSGSIZE if the user explicitly requested the data that couldn't be copied. * Note that structure sizes can change due to header changes and simple * recompilations without code changes(!), so if you care about * @ignored_trailing you probably want to make sure that any new field data is * associated with a flag. Otherwise you might assume that a program knows * about data it does not. * * @ksize is just sizeof(*src), and @usize should've been passed by userspace. * The recommended usage is something like the following: * * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize) * { * int err; * bool ignored_trailing; * struct foo karg = {}; * * if (usize > PAGE_SIZE) * return -E2BIG; * if (usize < FOO_SIZE_VER0) * return -EINVAL; * * // ... modify karg somehow ... * * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg), * &ignored_trailing); * if (err) * return err; * if (ignored_trailing) * return -EMSGSIZE: * * // ... * } * * There are three cases to consider: * * If @usize == @ksize, then it's copied verbatim. * * If @usize < @ksize, then the kernel is trying to pass userspace a newer * struct than it supports. Thus we only copy the interoperable portions * (@usize) and ignore the rest (but @ignored_trailing is set to %true if * any of the trailing (@ksize - @usize) bytes are non-zero). * * If @usize > @ksize, then the kernel is trying to pass userspace an older * struct than userspace supports. In order to make sure the * unknown-to-the-kernel fields don't contain garbage values, we zero the * trailing (@usize - @ksize) bytes. * * Returns (in all cases, some data may have been copied): * * -EFAULT: access to userspace failed. */ static __always_inline __must_check int copy_struct_to_user(void __user *dst, size_t usize, const void *src, size_t ksize, bool *ignored_trailing) { size_t size = min(ksize, usize); size_t rest = max(ksize, usize) - size; /* Double check if ksize is larger than a known object size. */ if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1))) return -E2BIG; /* Deal with trailing bytes. */ if (usize > ksize) { if (clear_user(dst + size, rest)) return -EFAULT; } if (ignored_trailing) *ignored_trailing = ksize < usize && memchr_inv(src + size, 0, rest) != NULL; /* Copy the interoperable parts of the struct. */ if (copy_to_user(dst, src, size)) return -EFAULT; return 0; } bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); long copy_from_kernel_nofault(void *dst, const void *src, size_t size); long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); long copy_from_user_nofault(void *dst, const void __user *src, size_t size); long notrace copy_to_user_nofault(void __user *dst, const void *src, size_t size); long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count); long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count); long strnlen_user_nofault(const void __user *unsafe_addr, long count); #ifdef arch_get_kernel_nofault /* * Wrap the architecture implementation so that @label can be outside of a * cleanup() scope. A regular C goto works correctly, but ASM goto does * not. Clang rejects such an attempt, but GCC silently emits buggy code. */ #define __get_kernel_nofault(dst, src, type, label) \ do { \ __label__ local_label; \ arch_get_kernel_nofault(dst, src, type, local_label); \ if (0) { \ local_label: \ goto label; \ } \ } while (0) #define __put_kernel_nofault(dst, src, type, label) \ do { \ __label__ local_label; \ arch_put_kernel_nofault(dst, src, type, local_label); \ if (0) { \ local_label: \ goto label; \ } \ } while (0) #elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */ #define __get_kernel_nofault(dst, src, type, label) \ do { \ type __user *p = (type __force __user *)(src); \ type data; \ if (__get_user(data, p)) \ goto label; \ *(type *)dst = data; \ } while (0) #define __put_kernel_nofault(dst, src, type, label) \ do { \ type __user *p = (type __force __user *)(dst); \ type data = *(type *)src; \ if (__put_user(data, p)) \ goto label; \ } while (0) #endif /* !__get_kernel_nofault */ /** * get_kernel_nofault(): safely attempt to read from a location * @val: read into this variable * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ #define get_kernel_nofault(val, ptr) ({ \ const typeof(val) *__gk_ptr = (ptr); \ copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ }) #ifdef user_access_begin #ifdef arch_unsafe_get_user /* * Wrap the architecture implementation so that @label can be outside of a * cleanup() scope. A regular C goto works correctly, but ASM goto does * not. Clang rejects such an attempt, but GCC silently emits buggy code. * * Some architectures use internal local labels already, but this extra * indirection here is harmless because the compiler optimizes it out * completely in any case. This construct just ensures that the ASM GOTO * target is always in the local scope. The C goto 'label' works correctly * when leaving a cleanup() scope. */ #define unsafe_get_user(x, ptr, label) \ do { \ __label__ local_label; \ arch_unsafe_get_user(x, ptr, local_label); \ if (0) { \ local_label: \ goto label; \ } \ } while (0) #define unsafe_put_user(x, ptr, label) \ do { \ __label__ local_label; \ arch_unsafe_put_user(x, ptr, local_label); \ if (0) { \ local_label: \ goto label; \ } \ } while (0) #endif /* arch_unsafe_get_user */ #else /* user_access_begin */ #define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif /* !user_access_begin */ #ifndef user_write_access_begin #define user_write_access_begin user_access_begin #define user_write_access_end user_access_end #endif #ifndef user_read_access_begin #define user_read_access_begin user_access_begin #define user_read_access_end user_access_end #endif /* Define RW variant so the below _mode macro expansion works */ #define masked_user_rw_access_begin(u) masked_user_access_begin(u) #define user_rw_access_begin(u, s) user_access_begin(u, s) #define user_rw_access_end() user_access_end() /* Scoped user access */ #define USER_ACCESS_GUARD(_mode) \ static __always_inline void __user * \ class_user_##_mode##_begin(void __user *ptr) \ { \ return ptr; \ } \ \ static __always_inline void \ class_user_##_mode##_end(void __user *ptr) \ { \ user_##_mode##_access_end(); \ } \ \ DEFINE_CLASS(user_ ##_mode## _access, void __user *, \ class_user_##_mode##_end(_T), \ class_user_##_mode##_begin(ptr), void __user *ptr) \ \ static __always_inline class_user_##_mode##_access_t \ class_user_##_mode##_access_ptr(void __user *scope) \ { \ return scope; \ } USER_ACCESS_GUARD(read) USER_ACCESS_GUARD(write) USER_ACCESS_GUARD(rw) #undef USER_ACCESS_GUARD /** * __scoped_user_access_begin - Start a scoped user access * @mode: The mode of the access class (read, write, rw) * @uptr: The pointer to access user space memory * @size: Size of the access * @elbl: Error label to goto when the access region is rejected * * Internal helper for __scoped_user_access(). Don't use directly. */ #define __scoped_user_access_begin(mode, uptr, size, elbl) \ ({ \ typeof(uptr) __retptr; \ \ if (can_do_masked_user_access()) { \ __retptr = masked_user_##mode##_access_begin(uptr); \ } else { \ __retptr = uptr; \ if (!user_##mode##_access_begin(uptr, size)) \ goto elbl; \ } \ __retptr; \ }) /** * __scoped_user_access - Open a scope for user access * @mode: The mode of the access class (read, write, rw) * @uptr: The pointer to access user space memory * @size: Size of the access * @elbl: Error label to goto when the access region is rejected. It * must be placed outside the scope * * If the user access function inside the scope requires a fault label, it * can use @elbl or a different label outside the scope, which requires * that user access which is implemented with ASM GOTO has been properly * wrapped. See unsafe_get_user() for reference. * * scoped_user_rw_access(ptr, efault) { * unsafe_get_user(rval, &ptr->rval, efault); * unsafe_put_user(wval, &ptr->wval, efault); * } * return 0; * efault: * return -EFAULT; * * The scope is internally implemented as a autoterminating nested for() * loop, which can be left with 'return', 'break' and 'goto' at any * point. * * When the scope is left user_##@_mode##_access_end() is automatically * invoked. * * When the architecture supports masked user access and the access region * which is determined by @uptr and @size is not a valid user space * address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user * space address and does not terminate early. This optimizes for the good * case and lets the performance uncritical bad case go through the fault. * * The eventual modification of the pointer is limited to the scope. * Outside of the scope the original pointer value is unmodified, so that * the original pointer value is available for diagnostic purposes in an * out of scope fault path. * * Nesting scoped user access into a user access scope is invalid and fails * the build. Nesting into other guards, e.g. pagefault is safe. * * The masked variant does not check the size of the access and relies on a * mapping hole (e.g. guard page) to catch an out of range pointer, the * first access to user memory inside the scope has to be within * @uptr ... @uptr + PAGE_SIZE - 1 * * Don't use directly. Use scoped_masked_user_$MODE_access() instead. */ #define __scoped_user_access(mode, uptr, size, elbl) \ for (bool done = false; !done; done = true) \ for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \ !done; done = true) \ for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \ /* Force modified pointer usage within the scope */ \ for (const typeof(uptr) uptr = _tmpptr; !done; done = true) /** * scoped_user_read_access_size - Start a scoped user read access with given size * @usrc: Pointer to the user space address to read from * @size: Size of the access starting from @usrc * @elbl: Error label to goto when the access region is rejected * * For further information see __scoped_user_access() above. */ #define scoped_user_read_access_size(usrc, size, elbl) \ __scoped_user_access(read, usrc, size, elbl) /** * scoped_user_read_access - Start a scoped user read access * @usrc: Pointer to the user space address to read from * @elbl: Error label to goto when the access region is rejected * * The size of the access starting from @usrc is determined via sizeof(*@usrc)). * * For further information see __scoped_user_access() above. */ #define scoped_user_read_access(usrc, elbl) \ scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl) /** * scoped_user_write_access_size - Start a scoped user write access with given size * @udst: Pointer to the user space address to write to * @size: Size of the access starting from @udst * @elbl: Error label to goto when the access region is rejected * * For further information see __scoped_user_access() above. */ #define scoped_user_write_access_size(udst, size, elbl) \ __scoped_user_access(write, udst, size, elbl) /** * scoped_user_write_access - Start a scoped user write access * @udst: Pointer to the user space address to write to * @elbl: Error label to goto when the access region is rejected * * The size of the access starting from @udst is determined via sizeof(*@udst)). * * For further information see __scoped_user_access() above. */ #define scoped_user_write_access(udst, elbl) \ scoped_user_write_access_size(udst, sizeof(*(udst)), elbl) /** * scoped_user_rw_access_size - Start a scoped user read/write access with given size * @uptr Pointer to the user space address to read from and write to * @size: Size of the access starting from @uptr * @elbl: Error label to goto when the access region is rejected * * For further information see __scoped_user_access() above. */ #define scoped_user_rw_access_size(uptr, size, elbl) \ __scoped_user_access(rw, uptr, size, elbl) /** * scoped_user_rw_access - Start a scoped user read/write access * @uptr Pointer to the user space address to read from and write to * @elbl: Error label to goto when the access region is rejected * * The size of the access starting from @uptr is determined via sizeof(*@uptr)). * * For further information see __scoped_user_access() above. */ #define scoped_user_rw_access(uptr, elbl) \ scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl) /** * get_user_inline - Read user data inlined * @val: The variable to store the value read from user memory * @usrc: Pointer to the user space memory to read from * * Return: 0 if successful, -EFAULT when faulted * * Inlined variant of get_user(). Only use when there is a demonstrable * performance reason. */ #define get_user_inline(val, usrc) \ ({ \ __label__ efault; \ typeof(usrc) _tmpsrc = usrc; \ int _ret = 0; \ \ scoped_user_read_access(_tmpsrc, efault) \ unsafe_get_user(val, _tmpsrc, efault); \ if (0) { \ efault: \ _ret = -EFAULT; \ } \ _ret; \ }) /** * put_user_inline - Write to user memory inlined * @val: The value to write * @udst: Pointer to the user space memory to write to * * Return: 0 if successful, -EFAULT when faulted * * Inlined variant of put_user(). Only use when there is a demonstrable * performance reason. */ #define put_user_inline(val, udst) \ ({ \ __label__ efault; \ typeof(udst) _tmpdst = udst; \ int _ret = 0; \ \ scoped_user_write_access(_tmpdst, efault) \ unsafe_put_user(val, _tmpdst, efault); \ if (0) { \ efault: \ _ret = -EFAULT; \ } \ _ret; \ }) #ifdef CONFIG_HARDENED_USERCOPY void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); #endif #endif /* __LINUX_UACCESS_H__ */ |
| 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H #include <linux/bug.h> #include <linux/kasan-enabled.h> #include <linux/kasan-tags.h> #include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> struct kmem_cache; struct page; struct slab; struct vm_struct; struct task_struct; #ifdef CONFIG_KASAN #include <linux/linkage.h> #include <asm/kasan.h> #endif typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #include <linux/pgtable.h> /* Software KASAN implementations use shadow memory. */ #ifdef CONFIG_KASAN_SW_TAGS /* This matches KASAN_TAG_INVALID. */ #define KASAN_SHADOW_INIT 0xFE #else #define KASAN_SHADOW_INIT 0 #endif #ifndef PTE_HWTABLE_PTRS #define PTE_HWTABLE_PTRS 0 #endif extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; int kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end); #ifndef kasan_mem_to_shadow static inline void *kasan_mem_to_shadow(const void *addr) { return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET; } #endif int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); /* Enable reporting bugs after kasan_disable_current() */ extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ extern void kasan_disable_current(void); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline int kasan_add_zero_shadow(void *start, unsigned long size) { return 0; } static inline void kasan_remove_zero_shadow(void *start, unsigned long size) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN_HW_TAGS #else /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_has_integrated_init(void) { return kasan_hw_tags_enabled(); } #ifdef CONFIG_KASAN void __kasan_unpoison_range(const void *addr, size_t size); static __always_inline void kasan_unpoison_range(const void *addr, size_t size) { if (kasan_enabled()) __kasan_unpoison_range(addr, size); } void __kasan_poison_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) { if (kasan_enabled()) __kasan_poison_pages(page, order, init); } bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); static __always_inline bool kasan_unpoison_pages(struct page *page, unsigned int order, bool init) { if (kasan_enabled()) return __kasan_unpoison_pages(page, order, init); return false; } void __kasan_poison_slab(struct slab *slab); static __always_inline void kasan_poison_slab(struct slab *slab) { if (kasan_enabled()) __kasan_poison_slab(slab); } void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object); /** * kasan_unpoison_new_object - Temporarily unpoison a new slab object. * @cache: Cache the object belong to. * @object: Pointer to the object. * * This function is intended for the slab allocator's internal use. It * temporarily unpoisons an object from a newly allocated slab without doing * anything else. The object must later be repoisoned by * kasan_poison_new_object(). */ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) __kasan_unpoison_new_object(cache, object); } void __kasan_poison_new_object(struct kmem_cache *cache, void *object); /** * kasan_poison_new_object - Repoison a new slab object. * @cache: Cache the object belong to. * @object: Pointer to the object. * * This function is intended for the slab allocator's internal use. It * repoisons an object that was previously unpoisoned by * kasan_unpoison_new_object() without doing anything else. */ static __always_inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) __kasan_poison_new_object(cache, object); } void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, const void *object); static __always_inline void * __must_check kasan_init_slab_obj( struct kmem_cache *cache, const void *object) { if (kasan_enabled()) return __kasan_init_slab_obj(cache, object); return (void *)object; } bool __kasan_slab_pre_free(struct kmem_cache *s, void *object, unsigned long ip); /** * kasan_slab_pre_free - Check whether freeing a slab object is safe. * @object: Object to be freed. * * This function checks whether freeing the given object is safe. It may * check for double-free and invalid-free bugs and report them. * * This function is intended only for use by the slab allocator. * * @Return true if freeing the object is unsafe; false otherwise. */ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object) { if (kasan_enabled()) return __kasan_slab_pre_free(s, object, _RET_IP_); return false; } bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init, bool still_accessible, bool no_quarantine); /** * kasan_slab_free - Poison, initialize, and quarantine a slab object. * @object: Object to be freed. * @init: Whether to initialize the object. * @still_accessible: Whether the object contents are still accessible. * * This function informs that a slab object has been freed and is not * supposed to be accessed anymore, except when @still_accessible is set * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU * grace period might not have passed yet). * * For KASAN modes that have integrated memory initialization * (kasan_has_integrated_init() == true), this function also initializes * the object's memory. For other modes, the @init argument is ignored. * * This function might also take ownership of the object to quarantine it. * When this happens, KASAN will defer freeing the object to a later * stage and handle it internally until then. The return value indicates * whether KASAN took ownership of the object. * * This function is intended only for use by the slab allocator. * * @Return true if KASAN took ownership of the object; false otherwise. */ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init, bool still_accessible, bool no_quarantine) { if (kasan_enabled()) return __kasan_slab_free(s, object, init, still_accessible, no_quarantine); return false; } void __kasan_kfree_large(void *ptr, unsigned long ip); static __always_inline void kasan_kfree_large(void *ptr) { if (kasan_enabled()) __kasan_kfree_large(ptr, _RET_IP_); } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( struct kmem_cache *s, void *object, gfp_t flags, bool init) { if (kasan_enabled()) return __kasan_slab_alloc(s, object, flags, init); return object; } void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { if (kasan_enabled()) return __kasan_kmalloc(s, object, size, flags); return (void *)object; } void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { if (kasan_enabled()) return __kasan_kmalloc_large(ptr, size, flags); return (void *)ptr; } void * __must_check __kasan_krealloc(const void *object, size_t new_size, gfp_t flags); static __always_inline void * __must_check kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { if (kasan_enabled()) return __kasan_krealloc(object, new_size, flags); return (void *)object; } bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, unsigned long ip); /** * kasan_mempool_poison_pages - Check and poison a mempool page allocation. * @page: Pointer to the page allocation. * @order: Order of the allocation. * * This function is intended for kernel subsystems that cache page allocations * to reuse them instead of freeing them back to page_alloc (e.g. mempool). * * This function is similar to kasan_mempool_poison_object() but operates on * page allocations. * * Before the poisoned allocation can be reused, it must be unpoisoned via * kasan_mempool_unpoison_pages(). * * Return: true if the allocation can be safely reused; false otherwise. */ static __always_inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) { if (kasan_enabled()) return __kasan_mempool_poison_pages(page, order, _RET_IP_); return true; } void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, unsigned long ip); /** * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation. * @page: Pointer to the page allocation. * @order: Order of the allocation. * * This function is intended for kernel subsystems that cache page allocations * to reuse them instead of freeing them back to page_alloc (e.g. mempool). * * This function unpoisons a page allocation that was previously poisoned by * kasan_mempool_poison_pages() without zeroing the allocation's memory. For * the tag-based modes, this function assigns a new tag to the allocation. */ static __always_inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) { if (kasan_enabled()) __kasan_mempool_unpoison_pages(page, order, _RET_IP_); } bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. * @ptr: Pointer to the slab allocation. * * This function is intended for kernel subsystems that cache slab allocations * to reuse them instead of freeing them back to the slab allocator (e.g. * mempool). * * This function poisons a slab allocation and saves a free stack trace for it * without initializing the allocation's memory and without putting it into the * quarantine (for the Generic mode). * * This function also performs checks to detect double-free and invalid-free * bugs and reports them. The caller can use the return value of this function * to find out if the allocation is buggy. * * Before the poisoned allocation can be reused, it must be unpoisoned via * kasan_mempool_unpoison_object(). * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the * size > KMALLOC_MAX_SIZE). * * Return: true if the allocation can be safely reused; false otherwise. */ static __always_inline bool kasan_mempool_poison_object(void *ptr) { if (kasan_enabled()) return __kasan_mempool_poison_object(ptr, _RET_IP_); return true; } void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip); /** * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation. * @ptr: Pointer to the slab allocation. * @size: Size to be unpoisoned. * * This function is intended for kernel subsystems that cache slab allocations * to reuse them instead of freeing them back to the slab allocator (e.g. * mempool). * * This function unpoisons a slab allocation that was previously poisoned via * kasan_mempool_poison_object() and saves an alloc stack trace for it without * initializing the allocation's memory. For the tag-based modes, this function * does not assign a new tag to the allocation and instead restores the * original tags based on the pointer value. * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the * size > KMALLOC_MAX_SIZE). */ static __always_inline void kasan_mempool_unpoison_object(void *ptr, size_t size) { if (kasan_enabled()) __kasan_mempool_unpoison_object(ptr, size, _RET_IP_); } /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. */ bool __kasan_check_byte(const void *addr, unsigned long ip); static __always_inline bool kasan_check_byte(const void *addr) { if (kasan_enabled()) return __kasan_check_byte(addr, _RET_IP_); return true; } #else /* CONFIG_KASAN */ static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) {} static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, bool init) { return false; } static inline void kasan_poison_slab(struct slab *slab) {} static inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) {} static inline void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) { return (void *)object; } static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object) { return false; } static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init, bool still_accessible, bool no_quarantine) { return false; } static inline void kasan_kfree_large(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init) { return object; } static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { return (void *)object; } static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { return (void *)ptr; } static inline void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { return (void *)object; } static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) { return true; } static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} static inline bool kasan_mempool_poison_object(void *ptr) { return true; } static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {} static inline bool kasan_check_byte(const void *address) { return true; } #endif /* CONFIG_KASAN */ #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) void kasan_unpoison_task_stack(struct task_struct *task); asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); #else static inline void kasan_unpoison_task_stack(struct task_struct *task) {} static inline void kasan_unpoison_task_stack_below(const void *watermark) {} #endif #ifdef CONFIG_KASAN_GENERIC struct kasan_cache { int alloc_meta_offset; int free_meta_offset; }; size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object); void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); #else /* CONFIG_KASAN_GENERIC */ /* Tag-based KASAN modes do not use per-object metadata. */ static inline size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) { return 0; } /* And no cache-related metadata initialization is required. */ static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) static inline void *kasan_reset_tag(const void *addr) { return (void *)arch_kasan_reset_tag(addr); } /** * kasan_report - print a report about a bad memory access detected by KASAN * @addr: address of the bad access * @size: size of the bad access * @is_write: whether the bad access is a write or a read * @ip: instruction pointer for the accessibility check or the bad access itself */ bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip); #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline void *kasan_reset_tag(const void *addr) { return (void *)addr; } #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ #ifdef CONFIG_KASAN_HW_TAGS void kasan_report_async(void); #endif /* CONFIG_KASAN_HW_TAGS */ #ifdef CONFIG_KASAN_GENERIC void __init kasan_init_generic(void); #else static inline void kasan_init_generic(void) { } #endif #ifdef CONFIG_KASAN_SW_TAGS void __init kasan_init_sw_tags(void); #else static inline void kasan_init_sw_tags(void) { } #endif #ifdef CONFIG_KASAN_HW_TAGS void kasan_init_hw_tags_cpu(void); void __init kasan_init_hw_tags(void); #else static inline void kasan_init_hw_tags_cpu(void) { } static inline void kasan_init_hw_tags(void) { } #endif #ifdef CONFIG_KASAN_VMALLOC #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask); static inline int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask) { if (kasan_enabled()) return __kasan_populate_vmalloc(addr, size, gfp_mask); return 0; } void __kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end, unsigned long flags); static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end, unsigned long flags) { if (kasan_enabled()) return __kasan_release_vmalloc(start, end, free_region_start, free_region_end, flags); } #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline void kasan_populate_early_vm_area_shadow(void *start, unsigned long size) { } static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size, gfp_t gfp_mask) { return 0; } static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end, unsigned long flags) { } #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, kasan_vmalloc_flags_t flags); static __always_inline void *kasan_unpoison_vmalloc(const void *start, unsigned long size, kasan_vmalloc_flags_t flags) { if (kasan_enabled()) return __kasan_unpoison_vmalloc(start, size, flags); return (void *)start; } void __kasan_poison_vmalloc(const void *start, unsigned long size); static __always_inline void kasan_poison_vmalloc(const void *start, unsigned long size) { if (kasan_enabled()) __kasan_poison_vmalloc(start, size); } #else /* CONFIG_KASAN_VMALLOC */ static inline void kasan_populate_early_vm_area_shadow(void *start, unsigned long size) { } static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size, gfp_t gfp_mask) { return 0; } static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end, unsigned long flags) { } static inline void *kasan_unpoison_vmalloc(const void *start, unsigned long size, kasan_vmalloc_flags_t flags) { return (void *)start; } static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ !defined(CONFIG_KASAN_VMALLOC) /* * These functions allocate and free shadow memory for kernel modules. * They are only required when KASAN_VMALLOC is not supported, as otherwise * shadow memory is allocated by the generic vmalloc handlers. */ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); void kasan_free_module_shadow(const struct vm_struct *vm); #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_non_canonical_hook(unsigned long addr); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline void kasan_non_canonical_hook(unsigned long addr) { } #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ #endif /* LINUX_KASAN_H */ |
| 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 | // SPDX-License-Identifier: GPL-2.0-or-later /* * cgroups support for the BFQ I/O scheduler. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/cgroup.h> #include <linux/ktime.h> #include <linux/rbtree.h> #include <linux/ioprio.h> #include <linux/sbitmap.h> #include <linux/delay.h> #include "elevator.h" #include "bfq-iosched.h" #ifdef CONFIG_BFQ_CGROUP_DEBUG static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) { int ret; ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); if (ret) return ret; atomic64_set(&stat->aux_cnt, 0); return 0; } static void bfq_stat_exit(struct bfq_stat *stat) { percpu_counter_destroy(&stat->cpu_cnt); } /** * bfq_stat_add - add a value to a bfq_stat * @stat: target bfq_stat * @val: value to add * * Add @val to @stat. The caller must ensure that IRQ on the same CPU * don't re-enter this function for the same counter. */ static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) { percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); } /** * bfq_stat_read - read the current value of a bfq_stat * @stat: bfq_stat to read */ static inline uint64_t bfq_stat_read(struct bfq_stat *stat) { return percpu_counter_sum_positive(&stat->cpu_cnt); } /** * bfq_stat_reset - reset a bfq_stat * @stat: bfq_stat to reset */ static inline void bfq_stat_reset(struct bfq_stat *stat) { percpu_counter_set(&stat->cpu_cnt, 0); atomic64_set(&stat->aux_cnt, 0); } /** * bfq_stat_add_aux - add a bfq_stat into another's aux count * @to: the destination bfq_stat * @from: the source * * Add @from's count including the aux one to @to's aux count. */ static inline void bfq_stat_add_aux(struct bfq_stat *to, struct bfq_stat *from) { atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), &to->aux_cnt); } /** * blkg_prfill_stat - prfill callback for bfq_stat * @sf: seq_file to print to * @pd: policy private data of interest * @off: offset to the bfq_stat in @pd * * prfill callback for printing a bfq_stat. */ static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) { return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); } /* bfqg stats flags */ enum bfqg_stats_flags { BFQG_stats_waiting = 0, BFQG_stats_idling, BFQG_stats_empty, }; #define BFQG_FLAG_FNS(name) \ static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ { \ stats->flags |= (1 << BFQG_stats_##name); \ } \ static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ { \ stats->flags &= ~(1 << BFQG_stats_##name); \ } \ static int bfqg_stats_##name(struct bfqg_stats *stats) \ { \ return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ } \ BFQG_FLAG_FNS(waiting) BFQG_FLAG_FNS(idling) BFQG_FLAG_FNS(empty) #undef BFQG_FLAG_FNS /* This should be called with the scheduler lock held. */ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) { u64 now; if (!bfqg_stats_waiting(stats)) return; now = blk_time_get_ns(); if (now > stats->start_group_wait_time) bfq_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); bfqg_stats_clear_waiting(stats); } /* This should be called with the scheduler lock held. */ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, struct bfq_group *curr_bfqg) { struct bfqg_stats *stats = &bfqg->stats; if (bfqg_stats_waiting(stats)) return; if (bfqg == curr_bfqg) return; stats->start_group_wait_time = blk_time_get_ns(); bfqg_stats_mark_waiting(stats); } /* This should be called with the scheduler lock held. */ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { u64 now; if (!bfqg_stats_empty(stats)) return; now = blk_time_get_ns(); if (now > stats->start_empty_time) bfq_stat_add(&stats->empty_time, now - stats->start_empty_time); bfqg_stats_clear_empty(stats); } void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { bfq_stat_add(&bfqg->stats.dequeue, 1); } void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats; if (blkg_rwstat_total(&stats->queued)) return; /* * group is already marked empty. This can happen if bfqq got new * request in parent group and moved to this group while being added * to service tree. Just ignore the event and move on. */ if (bfqg_stats_empty(stats)) return; stats->start_empty_time = blk_time_get_ns(); bfqg_stats_mark_empty(stats); } void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats; if (bfqg_stats_idling(stats)) { u64 now = blk_time_get_ns(); if (now > stats->start_idle_time) bfq_stat_add(&stats->idle_time, now - stats->start_idle_time); bfqg_stats_clear_idling(stats); } } void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats; stats->start_idle_time = blk_time_get_ns(); bfqg_stats_mark_idling(stats); } void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats; bfq_stat_add(&stats->avg_queue_size_sum, blkg_rwstat_total(&stats->queued)); bfq_stat_add(&stats->avg_queue_size_samples, 1); bfqg_stats_update_group_wait_time(stats); } void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, blk_opf_t opf) { blkg_rwstat_add(&bfqg->stats.queued, opf, 1); bfqg_stats_end_empty_time(&bfqg->stats); if (!(bfqq == bfqg->bfqd->in_service_queue)) bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); } void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { blkg_rwstat_add(&bfqg->stats.queued, opf, -1); } void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { blkg_rwstat_add(&bfqg->stats.merged, opf, 1); } void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, u64 io_start_time_ns, blk_opf_t opf) { struct bfqg_stats *stats = &bfqg->stats; u64 now = blk_time_get_ns(); if (now > io_start_time_ns) blkg_rwstat_add(&stats->service_time, opf, now - io_start_time_ns); if (io_start_time_ns > start_time_ns) blkg_rwstat_add(&stats->wait_time, opf, io_start_time_ns - start_time_ns); } #else /* CONFIG_BFQ_CGROUP_DEBUG */ void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, u64 io_start_time_ns, blk_opf_t opf) { } void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } #endif /* CONFIG_BFQ_CGROUP_DEBUG */ #ifdef CONFIG_BFQ_GROUP_IOSCHED /* * blk-cgroup policy-related handlers * The following functions help in converting between blk-cgroup * internal structures and BFQ-specific structures. */ static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) { return pd ? container_of(pd, struct bfq_group, pd) : NULL; } struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) { return pd_to_blkg(&bfqg->pd); } static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) { return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); } /* * bfq_group handlers * The following functions help in navigating the bfq_group hierarchy * by allowing to find the parent of a bfq_group or the bfq_group * associated to a bfq_queue. */ static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) { struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; return pblkg ? blkg_to_bfqg(pblkg) : NULL; } struct bfq_group *bfqq_group(struct bfq_queue *bfqq) { struct bfq_entity *group_entity = bfqq->entity.parent; return group_entity ? container_of(group_entity, struct bfq_group, entity) : bfqq->bfqd->root_group; } /* * The following two functions handle get and put of a bfq_group by * wrapping the related blk-cgroup hooks. */ static void bfqg_get(struct bfq_group *bfqg) { refcount_inc(&bfqg->ref); } static void bfqg_put(struct bfq_group *bfqg) { if (refcount_dec_and_test(&bfqg->ref)) kfree(bfqg); } static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); blkg_get(bfqg_to_blkg(bfqg)); } void bfqg_and_blkg_put(struct bfq_group *bfqg) { blkg_put(bfqg_to_blkg(bfqg)); bfqg_put(bfqg); } void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) { struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); if (!bfqg) return; blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); } /* @stats = 0 */ static void bfqg_stats_reset(struct bfqg_stats *stats) { #ifdef CONFIG_BFQ_CGROUP_DEBUG /* queued stats shouldn't be cleared */ blkg_rwstat_reset(&stats->merged); blkg_rwstat_reset(&stats->service_time); blkg_rwstat_reset(&stats->wait_time); bfq_stat_reset(&stats->time); bfq_stat_reset(&stats->avg_queue_size_sum); bfq_stat_reset(&stats->avg_queue_size_samples); bfq_stat_reset(&stats->dequeue); bfq_stat_reset(&stats->group_wait_time); bfq_stat_reset(&stats->idle_time); bfq_stat_reset(&stats->empty_time); #endif } /* @to += @from */ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) { if (!to || !from) return; #ifdef CONFIG_BFQ_CGROUP_DEBUG /* queued stats shouldn't be cleared */ blkg_rwstat_add_aux(&to->merged, &from->merged); blkg_rwstat_add_aux(&to->service_time, &from->service_time); blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); bfq_stat_add_aux(&from->time, &from->time); bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); bfq_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); bfq_stat_add_aux(&to->dequeue, &from->dequeue); bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); bfq_stat_add_aux(&to->idle_time, &from->idle_time); bfq_stat_add_aux(&to->empty_time, &from->empty_time); #endif } /* * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' * recursive stats can still account for the amount used by this bfqg after * it's gone. */ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) { struct bfq_group *parent; if (!bfqg) /* root_group */ return; parent = bfqg_parent(bfqg); lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); if (unlikely(!parent)) return; bfqg_stats_add_aux(&parent->stats, &bfqg->stats); bfqg_stats_reset(&bfqg->stats); } void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); entity->weight = entity->new_weight; entity->orig_weight = entity->new_weight; if (bfqq) { bfqq->ioprio = bfqq->new_ioprio; bfqq->ioprio_class = bfqq->new_ioprio_class; /* * Make sure that bfqg and its associated blkg do not * disappear before entity. */ bfqg_and_blkg_get(bfqg); } entity->parent = bfqg->my_entity; /* NULL for root group */ entity->sched_data = &bfqg->sched_data; } static void bfqg_stats_exit(struct bfqg_stats *stats) { blkg_rwstat_exit(&stats->bytes); blkg_rwstat_exit(&stats->ios); #ifdef CONFIG_BFQ_CGROUP_DEBUG blkg_rwstat_exit(&stats->merged); blkg_rwstat_exit(&stats->service_time); blkg_rwstat_exit(&stats->wait_time); blkg_rwstat_exit(&stats->queued); bfq_stat_exit(&stats->time); bfq_stat_exit(&stats->avg_queue_size_sum); bfq_stat_exit(&stats->avg_queue_size_samples); bfq_stat_exit(&stats->dequeue); bfq_stat_exit(&stats->group_wait_time); bfq_stat_exit(&stats->idle_time); bfq_stat_exit(&stats->empty_time); #endif } static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) { if (blkg_rwstat_init(&stats->bytes, gfp) || blkg_rwstat_init(&stats->ios, gfp)) goto error; #ifdef CONFIG_BFQ_CGROUP_DEBUG if (blkg_rwstat_init(&stats->merged, gfp) || blkg_rwstat_init(&stats->service_time, gfp) || blkg_rwstat_init(&stats->wait_time, gfp) || blkg_rwstat_init(&stats->queued, gfp) || bfq_stat_init(&stats->time, gfp) || bfq_stat_init(&stats->avg_queue_size_sum, gfp) || bfq_stat_init(&stats->avg_queue_size_samples, gfp) || bfq_stat_init(&stats->dequeue, gfp) || bfq_stat_init(&stats->group_wait_time, gfp) || bfq_stat_init(&stats->idle_time, gfp) || bfq_stat_init(&stats->empty_time, gfp)) goto error; #endif return 0; error: bfqg_stats_exit(stats); return -ENOMEM; } static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) { return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; } static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) { return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); } static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) { struct bfq_group_data *bgd; bgd = kzalloc(sizeof(*bgd), gfp); if (!bgd) return NULL; bgd->weight = CGROUP_WEIGHT_DFL; return &bgd->pd; } static void bfq_cpd_free(struct blkcg_policy_data *cpd) { kfree(cpd_to_bfqgd(cpd)); } static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) { struct bfq_group *bfqg; bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id); if (!bfqg) return NULL; if (bfqg_stats_init(&bfqg->stats, gfp)) { kfree(bfqg); return NULL; } /* see comments in bfq_bic_update_cgroup for why refcounting */ refcount_set(&bfqg->ref, 1); return &bfqg->pd; } static void bfq_pd_init(struct blkg_policy_data *pd) { struct blkcg_gq *blkg = pd_to_blkg(pd); struct bfq_group *bfqg = blkg_to_bfqg(blkg); struct bfq_data *bfqd = blkg->q->elevator->elevator_data; struct bfq_entity *entity = &bfqg->entity; struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); entity->orig_weight = entity->weight = entity->new_weight = d->weight; entity->my_sched_data = &bfqg->sched_data; entity->last_bfqq_created = NULL; bfqg->my_entity = entity; /* * the root_group's will be set to NULL * in bfq_init_queue() */ bfqg->bfqd = bfqd; bfqg->active_entities = 0; bfqg->num_queues_with_pending_reqs = 0; bfqg->rq_pos_tree = RB_ROOT; } static void bfq_pd_free(struct blkg_policy_data *pd) { struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg_stats_exit(&bfqg->stats); bfqg_put(bfqg); } static void bfq_pd_reset_stats(struct blkg_policy_data *pd) { struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg_stats_reset(&bfqg->stats); } static void bfq_group_set_parent(struct bfq_group *bfqg, struct bfq_group *parent) { struct bfq_entity *entity; entity = &bfqg->entity; entity->parent = parent->my_entity; entity->sched_data = &parent->sched_data; } static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) { struct bfq_group *parent; struct bfq_entity *entity; /* * Update chain of bfq_groups as we might be handling a leaf group * which, along with some of its relatives, has not been hooked yet * to the private hierarchy of BFQ. */ entity = &bfqg->entity; for_each_entity(entity) { struct bfq_group *curr_bfqg = container_of(entity, struct bfq_group, entity); if (curr_bfqg != bfqd->root_group) { parent = bfqg_parent(curr_bfqg); if (!parent) parent = bfqd->root_group; bfq_group_set_parent(curr_bfqg, parent); } } } struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) { struct blkcg_gq *blkg = bio->bi_blkg; struct bfq_group *bfqg; while (blkg) { if (!blkg->online) { blkg = blkg->parent; continue; } bfqg = blkg_to_bfqg(blkg); if (bfqg->pd.online) { bio_associate_blkg_from_css(bio, &blkg->blkcg->css); return bfqg; } blkg = blkg->parent; } bio_associate_blkg_from_css(bio, &bfqg_to_blkg(bfqd->root_group)->blkcg->css); return bfqd->root_group; } /** * bfq_bfqq_move - migrate @bfqq to @bfqg. * @bfqd: queue descriptor. * @bfqq: the queue to move. * @bfqg: the group to move to. * * Move @bfqq to @bfqg, deactivating it from its old group and reactivating * it on the new one. Avoid putting the entity on the old group idle tree. * * Must be called under the scheduler lock, to make sure that the blkg * owning @bfqg does not disappear (see comments in * bfq_bic_update_cgroup on guaranteeing the consistency of blkg * objects). */ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg) { struct bfq_entity *entity = &bfqq->entity; struct bfq_group *old_parent = bfqq_group(bfqq); bool has_pending_reqs = false; /* * No point to move bfqq to the same group, which can happen when * root group is offlined */ if (old_parent == bfqg) return; /* * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group * until elevator exit. */ if (bfqq == &bfqd->oom_bfqq) return; /* * Get extra reference to prevent bfqq from being freed in * next possible expire or deactivate. */ bfqq->ref++; if (entity->in_groups_with_pending_reqs) { has_pending_reqs = true; bfq_del_bfqq_in_groups_with_pending_reqs(bfqq); } /* If bfqq is empty, then bfq_bfqq_expire also invokes * bfq_del_bfqq_busy, thereby removing bfqq and its entity * from data structures related to current group. Otherwise we * need to remove bfqq explicitly with bfq_deactivate_bfqq, as * we do below. */ if (bfqq == bfqd->in_service_queue) bfq_bfqq_expire(bfqd, bfqd->in_service_queue, false, BFQQE_PREEMPTED); if (bfq_bfqq_busy(bfqq)) bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st_or_in_serv) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); bfqg_and_blkg_put(old_parent); bfq_reassign_last_bfqq(bfqq, NULL); entity->parent = bfqg->my_entity; entity->sched_data = &bfqg->sched_data; /* pin down bfqg and its associated blkg */ bfqg_and_blkg_get(bfqg); if (has_pending_reqs) bfq_add_bfqq_in_groups_with_pending_reqs(bfqq); if (bfq_bfqq_busy(bfqq)) { if (unlikely(!bfqd->nonrot_with_queueing)) bfq_pos_tree_add_move(bfqd, bfqq); bfq_activate_bfqq(bfqd, bfqq); } if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver) bfq_schedule_dispatch(bfqd); /* release extra ref taken above, bfqq may happen to be freed now */ bfq_put_queue(bfqq); } static void bfq_sync_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *sync_bfqq, struct bfq_io_cq *bic, struct bfq_group *bfqg, unsigned int act_idx) { struct bfq_queue *bfqq; if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { /* We are the only user of this bfqq, just move it */ if (sync_bfqq->entity.sched_data != &bfqg->sched_data) bfq_bfqq_move(bfqd, sync_bfqq, bfqg); return; } /* * The queue was merged to a different queue. Check * that the merge chain still belongs to the same * cgroup. */ for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) if (bfqq->entity.sched_data != &bfqg->sched_data) break; if (bfqq) { /* * Some queue changed cgroup so the merge is not valid * anymore. We cannot easily just cancel the merge (by * clearing new_bfqq) as there may be other processes * using this queue and holding refs to all queues * below sync_bfqq->new_bfqq. Similarly if the merge * already happened, we need to detach from bfqq now * so that we cannot merge bio to a request from the * old cgroup. */ bfq_put_cooperator(sync_bfqq); bic_set_bfqq(bic, NULL, true, act_idx); bfq_release_process_ref(bfqd, sync_bfqq); } } /** * __bfq_bic_change_cgroup - move @bic to @bfqg. * @bfqd: the queue descriptor. * @bic: the bic to move. * @bfqg: the group to move to. * * Move bic to blkcg, assuming that bfqd->lock is held; which makes * sure that the reference to cgroup is valid across the call (see * comments in bfq_bic_update_cgroup on this issue) */ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, struct bfq_io_cq *bic, struct bfq_group *bfqg) { unsigned int act_idx; for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) { struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx); struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx); if (async_bfqq && async_bfqq->entity.sched_data != &bfqg->sched_data) { bic_set_bfqq(bic, NULL, false, act_idx); bfq_release_process_ref(bfqd, async_bfqq); } if (sync_bfqq) bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx); } } void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) { struct bfq_data *bfqd = bic_to_bfqd(bic); struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); uint64_t serial_nr; serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; /* * Check whether blkcg has changed. The condition may trigger * spuriously on a newly created cic but there's no harm. */ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) return; /* * New cgroup for this process. Make sure it is linked to bfq internal * cgroup hierarchy. */ bfq_link_bfqg(bfqd, bfqg); __bfq_bic_change_cgroup(bfqd, bic, bfqg); bic->blkcg_serial_nr = serial_nr; } /** * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. * @st: the service tree being flushed. */ static void bfq_flush_idle_tree(struct bfq_service_tree *st) { struct bfq_entity *entity = st->first_idle; for (; entity ; entity = st->first_idle) __bfq_deactivate_entity(entity, false); } /** * bfq_reparent_leaf_entity - move leaf entity to the root_group. * @bfqd: the device data structure with the root group. * @entity: the entity to move, if entity is a leaf; or the parent entity * of an active leaf entity to move, if entity is not a leaf. * @ioprio_class: I/O priority class to reparent. */ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, struct bfq_entity *entity, int ioprio_class) { struct bfq_queue *bfqq; struct bfq_entity *child_entity = entity; while (child_entity->my_sched_data) { /* leaf not reached yet */ struct bfq_sched_data *child_sd = child_entity->my_sched_data; struct bfq_service_tree *child_st = child_sd->service_tree + ioprio_class; struct rb_root *child_active = &child_st->active; child_entity = bfq_entity_of(rb_first(child_active)); if (!child_entity) child_entity = child_sd->in_service_entity; } bfqq = bfq_entity_to_bfqq(child_entity); bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); } /** * bfq_reparent_active_queues - move to the root group all active queues. * @bfqd: the device data structure with the root group. * @bfqg: the group to move from. * @st: the service tree to start the search from. * @ioprio_class: I/O priority class to reparent. */ static void bfq_reparent_active_queues(struct bfq_data *bfqd, struct bfq_group *bfqg, struct bfq_service_tree *st, int ioprio_class) { struct rb_root *active = &st->active; struct bfq_entity *entity; while ((entity = bfq_entity_of(rb_first(active)))) bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); if (bfqg->sched_data.in_service_entity) bfq_reparent_leaf_entity(bfqd, bfqg->sched_data.in_service_entity, ioprio_class); } /** * bfq_pd_offline - deactivate the entity associated with @pd, * and reparent its children entities. * @pd: descriptor of the policy going offline. * * blkio already grabs the queue_lock for us, so no need to use * RCU-based magic */ static void bfq_pd_offline(struct blkg_policy_data *pd) { struct bfq_service_tree *st; struct bfq_group *bfqg = pd_to_bfqg(pd); struct bfq_data *bfqd = bfqg->bfqd; struct bfq_entity *entity = bfqg->my_entity; unsigned long flags; int i; spin_lock_irqsave(&bfqd->lock, flags); if (!entity) /* root group */ goto put_async_queues; /* * Empty all service_trees belonging to this group before * deactivating the group itself. */ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { st = bfqg->sched_data.service_tree + i; /* * It may happen that some queues are still active * (busy) upon group destruction (if the corresponding * processes have been forced to terminate). We move * all the leaf entities corresponding to these queues * to the root_group. * Also, it may happen that the group has an entity * in service, which is disconnected from the active * tree: it must be moved, too. * There is no need to put the sync queues, as the * scheduler has taken no reference. */ bfq_reparent_active_queues(bfqd, bfqg, st, i); /* * The idle tree may still contain bfq_queues * belonging to exited task because they never * migrated to a different cgroup from the one being * destroyed now. In addition, even * bfq_reparent_active_queues() may happen to add some * entities to the idle tree. It happens if, in some * of the calls to bfq_bfqq_move() performed by * bfq_reparent_active_queues(), the queue to move is * empty and gets expired. */ bfq_flush_idle_tree(st); } __bfq_deactivate_entity(entity, false); put_async_queues: bfq_put_async_queues(bfqd, bfqg); spin_unlock_irqrestore(&bfqd->lock, flags); /* * @blkg is going offline and will be ignored by * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so * that they don't get lost. If IOs complete after this point, the * stats for them will be lost. Oh well... */ bfqg_stats_xfer_dead(bfqg); } void bfq_end_wr_async(struct bfq_data *bfqd) { struct blkcg_gq *blkg; list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); bfq_end_wr_async_queues(bfqd, bfqg); } bfq_end_wr_async_queues(bfqd, bfqd->root_group); } static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v) { struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); unsigned int val = 0; if (bfqgd) val = bfqgd->weight; seq_printf(sf, "%u\n", val); return 0; } static u64 bfqg_prfill_weight_device(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct bfq_group *bfqg = pd_to_bfqg(pd); if (!bfqg->entity.dev_weight) return 0; return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); } static int bfq_io_show_weight(struct seq_file *sf, void *v) { struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); seq_printf(sf, "default %u\n", bfqgd->weight); blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device, &blkcg_policy_bfq, 0, false); return 0; } static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) { weight = dev_weight ?: weight; bfqg->entity.dev_weight = dev_weight; /* * Setting the prio_changed flag of the entity * to 1 with new_weight == weight would re-set * the value of the weight to its ioprio mapping. * Set the flag only if necessary. */ if ((unsigned short)weight != bfqg->entity.new_weight) { bfqg->entity.new_weight = (unsigned short)weight; /* * Make sure that the above new value has been * stored in bfqg->entity.new_weight before * setting the prio_changed flag. In fact, * this flag may be read asynchronously (in * critical sections protected by a different * lock than that held here), and finding this * flag set may cause the execution of the code * for updating parameters whose value may * depend also on bfqg->entity.new_weight (in * __bfq_entity_update_weight_prio). * This barrier makes sure that the new value * of bfqg->entity.new_weight is correctly * seen in that code. */ smp_wmb(); bfqg->entity.prio_changed = 1; } } static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, struct cftype *cftype, u64 val) { struct blkcg *blkcg = css_to_blkcg(css); struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); struct blkcg_gq *blkg; int ret = -ERANGE; if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) return ret; ret = 0; spin_lock_irq(&blkcg->lock); bfqgd->weight = (unsigned short)val; hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); if (bfqg) bfq_group_set_weight(bfqg, val, 0); } spin_unlock_irq(&blkcg->lock); return ret; } static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { int ret; struct blkg_conf_ctx ctx; struct blkcg *blkcg = css_to_blkcg(of_css(of)); struct bfq_group *bfqg; u64 v; blkg_conf_init(&ctx, buf); ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx); if (ret) goto out; if (sscanf(ctx.body, "%llu", &v) == 1) { /* require "default" on dfl */ ret = -ERANGE; if (!v) goto out; } else if (!strcmp(strim(ctx.body), "default")) { v = 0; } else { ret = -EINVAL; goto out; } bfqg = blkg_to_bfqg(ctx.blkg); ret = -ERANGE; if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) { bfq_group_set_weight(bfqg, bfqg->entity.weight, v); ret = 0; } out: blkg_conf_exit(&ctx); return ret ?: nbytes; } static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { char *endp; int ret; u64 v; buf = strim(buf); /* "WEIGHT" or "default WEIGHT" sets the default weight */ v = simple_strtoull(buf, &endp, 0); if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { ret = bfq_io_set_weight_legacy(of_css(of), NULL, v); return ret ?: nbytes; } return bfq_io_set_device_weight(of, buf, nbytes, off); } static int bfqg_print_rwstat(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, &blkcg_policy_bfq, seq_cft(sf)->private, true); return 0; } static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat_sample sum; blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum); return __blkg_prfill_rwstat(sf, pd, &sum); } static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, seq_cft(sf)->private, true); return 0; } #ifdef CONFIG_BFQ_CGROUP_DEBUG static int bfqg_print_stat(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, &blkcg_policy_bfq, seq_cft(sf)->private, false); return 0; } static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkcg_gq *blkg = pd_to_blkg(pd); struct blkcg_gq *pos_blkg; struct cgroup_subsys_state *pos_css; u64 sum = 0; lockdep_assert_held(&blkg->q->queue_lock); rcu_read_lock(); blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { struct bfq_stat *stat; if (!pos_blkg->online) continue; stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off; sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); } rcu_read_unlock(); return __blkg_prfill_u64(sf, pd, sum); } static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), bfqg_prfill_stat_recursive, &blkcg_policy_bfq, seq_cft(sf)->private, false); return 0; } static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); return __blkg_prfill_u64(sf, pd, sum >> 9); } static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); return 0; } static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat_sample tmp; blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq, offsetof(struct bfq_group, stats.bytes), &tmp); return __blkg_prfill_u64(sf, pd, (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9); } static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, false); return 0; } static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct bfq_group *bfqg = pd_to_bfqg(pd); u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); u64 v = 0; if (samples) { v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); v = div64_u64(v, samples); } __blkg_prfill_u64(sf, pd, v); return 0; } /* print avg_queue_size */ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, 0, false); return 0; } #endif /* CONFIG_BFQ_CGROUP_DEBUG */ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) { int ret; ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq); if (ret) return NULL; return blkg_to_bfqg(bfqd->queue->root_blkg); } struct blkcg_policy blkcg_policy_bfq = { .dfl_cftypes = bfq_blkg_files, .legacy_cftypes = bfq_blkcg_legacy_files, .cpd_alloc_fn = bfq_cpd_alloc, .cpd_free_fn = bfq_cpd_free, .pd_alloc_fn = bfq_pd_alloc, .pd_init_fn = bfq_pd_init, .pd_offline_fn = bfq_pd_offline, .pd_free_fn = bfq_pd_free, .pd_reset_stats_fn = bfq_pd_reset_stats, }; struct cftype bfq_blkcg_legacy_files[] = { { .name = "bfq.weight", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = bfq_io_show_weight_legacy, .write_u64 = bfq_io_set_weight_legacy, }, { .name = "bfq.weight_device", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = bfq_io_show_weight, .write = bfq_io_set_weight, }, /* statistics, covers only the tasks in the bfqg */ { .name = "bfq.io_service_bytes", .private = offsetof(struct bfq_group, stats.bytes), .seq_show = bfqg_print_rwstat, }, { .name = "bfq.io_serviced", .private = offsetof(struct bfq_group, stats.ios), .seq_show = bfqg_print_rwstat, }, #ifdef CONFIG_BFQ_CGROUP_DEBUG { .name = "bfq.time", .private = offsetof(struct bfq_group, stats.time), .seq_show = bfqg_print_stat, }, { .name = "bfq.sectors", .seq_show = bfqg_print_stat_sectors, }, { .name = "bfq.io_service_time", .private = offsetof(struct bfq_group, stats.service_time), .seq_show = bfqg_print_rwstat, }, { .name = "bfq.io_wait_time", .private = offsetof(struct bfq_group, stats.wait_time), .seq_show = bfqg_print_rwstat, }, { .name = "bfq.io_merged", .private = offsetof(struct bfq_group, stats.merged), .seq_show = bfqg_print_rwstat, }, { .name = "bfq.io_queued", .private = offsetof(struct bfq_group, stats.queued), .seq_show = bfqg_print_rwstat, }, #endif /* CONFIG_BFQ_CGROUP_DEBUG */ /* the same statistics which cover the bfqg and its descendants */ { .name = "bfq.io_service_bytes_recursive", .private = offsetof(struct bfq_group, stats.bytes), .seq_show = bfqg_print_rwstat_recursive, }, { .name = "bfq.io_serviced_recursive", .private = offsetof(struct bfq_group, stats.ios), .seq_show = bfqg_print_rwstat_recursive, }, #ifdef CONFIG_BFQ_CGROUP_DEBUG { .name = "bfq.time_recursive", .private = offsetof(struct bfq_group, stats.time), .seq_show = bfqg_print_stat_recursive, }, { .name = "bfq.sectors_recursive", .seq_show = bfqg_print_stat_sectors_recursive, }, { .name = "bfq.io_service_time_recursive", .private = offsetof(struct bfq_group, stats.service_time), .seq_show = bfqg_print_rwstat_recursive, }, { .name = "bfq.io_wait_time_recursive", .private = offsetof(struct bfq_group, stats.wait_time), .seq_show = bfqg_print_rwstat_recursive, }, { .name = "bfq.io_merged_recursive", .private = offsetof(struct bfq_group, stats.merged), .seq_show = bfqg_print_rwstat_recursive, }, { .name = "bfq.io_queued_recursive", .private = offsetof(struct bfq_group, stats.queued), .seq_show = bfqg_print_rwstat_recursive, }, { .name = "bfq.avg_queue_size", .seq_show = bfqg_print_avg_queue_size, }, { .name = "bfq.group_wait_time", .private = offsetof(struct bfq_group, stats.group_wait_time), .seq_show = bfqg_print_stat, }, { .name = "bfq.idle_time", .private = offsetof(struct bfq_group, stats.idle_time), .seq_show = bfqg_print_stat, }, { .name = "bfq.empty_time", .private = offsetof(struct bfq_group, stats.empty_time), .seq_show = bfqg_print_stat, }, { .name = "bfq.dequeue", .private = offsetof(struct bfq_group, stats.dequeue), .seq_show = bfqg_print_stat, }, #endif /* CONFIG_BFQ_CGROUP_DEBUG */ { } /* terminate */ }; struct cftype bfq_blkg_files[] = { { .name = "bfq.weight", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = bfq_io_show_weight, .write = bfq_io_set_weight, }, {} /* terminate */ }; #else /* CONFIG_BFQ_GROUP_IOSCHED */ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg) {} void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); entity->weight = entity->new_weight; entity->orig_weight = entity->new_weight; if (bfqq) { bfqq->ioprio = bfqq->new_ioprio; bfqq->ioprio_class = bfqq->new_ioprio_class; } entity->sched_data = &bfqg->sched_data; } void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} void bfq_end_wr_async(struct bfq_data *bfqd) { bfq_end_wr_async_queues(bfqd, bfqd->root_group); } struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) { return bfqd->root_group; } struct bfq_group *bfqq_group(struct bfq_queue *bfqq) { return bfqq->bfqd->root_group; } void bfqg_and_blkg_put(struct bfq_group *bfqg) {} struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) { struct bfq_group *bfqg; int i; bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); if (!bfqg) return NULL; for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; return bfqg; } #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 3 3 1 2 1 3 1 2 3 3 1 2 3 2 1 1 4 4 1 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 | // SPDX-License-Identifier: GPL-2.0-or-later /* * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD * * Copyright(C) 2010 Jarod Wilson <jarod@wilsonet.com> * Portions based on the original lirc_imon driver, * Copyright(C) 2004 Venky Raju(dev@venky.ws) * * Huge thanks to R. Geoff Newbury for invaluable debugging on the * 0xffdc iMON devices, and for sending me one to hack on, without * which the support for them wouldn't be nearly as good. Thanks * also to the numerous 0xffdc device owners that tested auto-config * support for me and provided debug dumps from their devices. */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include <linux/timer.h> #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" #define MOD_VERSION "0.9.4" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" #define IMON_CLOCK_ENABLE_PACKETS 2 /*** P R O T O T Y P E S ***/ /* USB Callback prototypes */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id); static void imon_disconnect(struct usb_interface *interface); static void usb_rx_callback_intf0(struct urb *urb); static void usb_rx_callback_intf1(struct urb *urb); static void usb_tx_callback(struct urb *urb); /* suspend/resume support */ static int imon_resume(struct usb_interface *intf); static int imon_suspend(struct usb_interface *intf, pm_message_t message); /* Display file_operations function prototypes */ static int display_open(struct inode *inode, struct file *file); static int display_close(struct inode *inode, struct file *file); /* VFD write operation */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /* LCD file_operations override function prototypes */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /*** G L O B A L S ***/ struct imon_panel_key_table { u64 hw_code; u32 keycode; }; struct imon_usb_dev_descr { __u16 flags; #define IMON_NO_FLAGS 0 #define IMON_NEED_20MS_PKT_DELAY 1 #define IMON_SUPPRESS_REPEATED_KEYS 2 struct imon_panel_key_table key_table[]; }; struct imon_context { struct device *dev; /* Newer devices have two interfaces */ struct usb_device *usbdev_intf0; struct usb_device *usbdev_intf1; bool display_supported; /* not all controllers do */ bool display_isopen; /* display port has been opened */ bool rf_device; /* true if iMON 2.4G LT/DT RF device */ bool rf_isassociating; /* RF remote associating */ bool dev_present_intf0; /* USB device presence, interface 0 */ bool dev_present_intf1; /* USB device presence, interface 1 */ struct mutex lock; /* to lock this object */ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ struct usb_endpoint_descriptor *rx_endpoint_intf0; struct usb_endpoint_descriptor *rx_endpoint_intf1; struct usb_endpoint_descriptor *tx_endpoint; struct urb *rx_urb_intf0; struct urb *rx_urb_intf1; struct urb *tx_urb; bool tx_control; unsigned char usb_rx_buf[8]; unsigned char usb_tx_buf[8]; unsigned int send_packet_delay; struct tx_t { unsigned char data_buf[35]; /* user data buffer */ struct completion finished; /* wait for write to finish */ bool busy; /* write in progress */ int status; /* status of tx completion */ } tx; u16 vendor; /* usb vendor ID */ u16 product; /* usb product ID */ struct rc_dev *rdev; /* rc-core device for remote */ struct input_dev *idev; /* input device for panel & IR mouse */ struct input_dev *touch; /* input device for touchscreen */ spinlock_t kc_lock; /* make sure we get keycodes right */ u32 kc; /* current input keycode */ u32 last_keycode; /* last reported input keycode */ u32 rc_scancode; /* the computed remote scancode */ u8 rc_toggle; /* the computed remote toggle bit */ u64 rc_proto; /* iMON or MCE (RC6) IR protocol? */ bool release_code; /* some keys send a release code */ u8 display_type; /* store the display type */ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */ char name_rdev[128]; /* rc input device name */ char phys_rdev[64]; /* rc input device phys path */ char name_idev[128]; /* input device name */ char phys_idev[64]; /* input device phys path */ char name_touch[128]; /* touch screen name */ char phys_touch[64]; /* touch screen phys path */ struct timer_list ttimer; /* touch screen timer */ int touch_x; /* x coordinate on touchscreen */ int touch_y; /* y coordinate on touchscreen */ const struct imon_usb_dev_descr *dev_descr; /* device description with key */ /* table for front panels */ /* * Fields for deferring free_imon_context(). * * Since reference to "struct imon_context" is stored into * "struct file"->private_data, we need to remember * how many file descriptors might access this "struct imon_context". */ refcount_t users; /* * Use a flag for telling display_open()/vfd_write()/lcd_write() that * imon_disconnect() was already called. */ bool disconnected; /* * We need to wait for RCU grace period in order to allow * display_open() to safely check ->disconnected and increment ->users. */ struct rcu_head rcu; }; #define TOUCH_TIMEOUT (HZ/30) /* vfd character device file operations */ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, .open = display_open, .write = vfd_write, .release = display_close, .llseek = noop_llseek, }; /* lcd character device file operations */ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .open = display_open, .write = lcd_write, .release = display_close, .llseek = noop_llseek, }; enum { IMON_DISPLAY_TYPE_AUTO = 0, IMON_DISPLAY_TYPE_VFD = 1, IMON_DISPLAY_TYPE_LCD = 2, IMON_DISPLAY_TYPE_VGA = 3, IMON_DISPLAY_TYPE_NONE = 4, }; enum { IMON_KEY_IMON = 0, IMON_KEY_MCE = 1, IMON_KEY_PANEL = 2, }; static struct usb_class_driver imon_vfd_class = { .name = DEVICE_NAME, .fops = &vfd_fops, .minor_base = DISPLAY_MINOR_BASE, }; static struct usb_class_driver imon_lcd_class = { .name = DEVICE_NAME, .fops = &lcd_fops, .minor_base = DISPLAY_MINOR_BASE, }; /* imon receiver front panel/knob key table */ static const struct imon_usb_dev_descr imon_default_table = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002000ffeell, KEY_VIDEO }, { 0x000000002100ffeell, KEY_CAMERA }, { 0x000000002700ffeell, KEY_DVD }, { 0x000000002300ffeell, KEY_TV }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; static const struct imon_usb_dev_descr imon_OEM_VFD = { .flags = IMON_NEED_20MS_PKT_DELAY, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* imon receiver front panel/knob key table for DH102*/ static const struct imon_usb_dev_descr imon_DH102 = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0x0000000f0000ffeell, KEY_MEDIA }, { 0x000000120000ffeell, KEY_UP }, { 0x000000130000ffeell, KEY_DOWN }, { 0x000000140000ffeell, KEY_LEFT }, { 0x000000150000ffeell, KEY_RIGHT }, { 0x000000160000ffeell, KEY_ENTER }, { 0x000000170000ffeell, KEY_ESC }, { 0x0000002b0000ffeell, KEY_EXIT }, { 0x0000002c0000ffeell, KEY_SELECT }, { 0x0000002d0000ffeell, KEY_MENU }, { 0, KEY_RESERVED } } }; /* imon ultrabay front panel key table */ static const struct imon_usb_dev_descr ultrabay_table = { .flags = IMON_SUPPRESS_REPEATED_KEYS, .key_table = { { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */ { 0x000000000100ffeell, KEY_UP }, { 0x000000000001ffeell, KEY_DOWN }, { 0x000000160000ffeell, KEY_ENTER }, { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */ { 0x000000270000ffeell, KEY_DVD }, /* DVD */ { 0x000000230000ffeell, KEY_TV }, /* TV */ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */ { 0x000000070000ffeell, KEY_REWIND }, { 0x000000040000ffeell, KEY_STOP }, { 0x000000020000ffeell, KEY_PLAYPAUSE }, { 0x000000080000ffeell, KEY_FASTFORWARD }, { 0x000000060000ffeell, KEY_NEXT }, /* Next */ { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* * USB Device ID for iMON USB Control Boards * * The Windows drivers contain 6 different inf files, more or less one for * each new device until the 0x0034-0x0046 devices, which all use the same * driver. Some of the devices in the 34-46 range haven't been definitively * identified yet. Early devices have either a TriGem Computer, Inc. or a * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later * devices use the SoundGraph vendor ID (0x15c2). This driver only supports * the ffdc and later devices, which do onboard decoding. */ static const struct usb_device_id imon_usb_id_table[] = { /* * Several devices with this same device ID, all use iMON_PAD.inf * SoundGraph iMON PAD (IR & VFD) * SoundGraph iMON PAD (IR & LCD) * SoundGraph iMON Knob (IR only) */ { USB_DEVICE(0x15c2, 0xffdc), .driver_info = (unsigned long)&imon_default_table }, /* * Newer devices, all driven by the latest iMON Windows driver, full * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2' * Need user input to fill in details on unknown devices. */ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0034), .driver_info = (unsigned long)&imon_DH102 }, /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0035), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM VFD (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0036), .driver_info = (unsigned long)&imon_OEM_VFD }, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0037), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM LCD (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0038), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON UltraBay (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0039), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003a), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003b), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM Inside (IR only) */ { USB_DEVICE(0x15c2, 0x003c), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003d), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003e), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003f), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0040), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON MINI (IR only) */ { USB_DEVICE(0x15c2, 0x0041), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station EZ External (IR only) */ { USB_DEVICE(0x15c2, 0x0042), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Basic Internal (IR only) */ { USB_DEVICE(0x15c2, 0x0043), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Elite (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0044), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Premiere (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0045), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0046), .driver_info = (unsigned long)&imon_default_table}, {} }; /* USB Device data */ static struct usb_driver imon_driver = { .name = MOD_NAME, .probe = imon_probe, .disconnect = imon_disconnect, .suspend = imon_suspend, .resume = imon_resume, .id_table = imon_usb_id_table, }; /* Module bookkeeping bits */ MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESC); MODULE_VERSION(MOD_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, imon_usb_id_table); static bool debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)"); /* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */ static int display_type; module_param(display_type, int, S_IRUGO); MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)"); static int pad_stabilize = 1; module_param(pad_stabilize, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default)."); /* * In certain use cases, mouse mode isn't really helpful, and could actually * cause confusion, so allow disabling it when the IR device is open. */ static bool nomouse; module_param(nomouse, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)"); /* threshold at which a pad push registers as an arrow key in kbd mode */ static int pad_thresh; module_param(pad_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)"); static void free_imon_context(struct imon_context *ictx) { struct device *dev = ictx->dev; usb_free_urb(ictx->tx_urb); WARN_ON(ictx->dev_present_intf0); usb_free_urb(ictx->rx_urb_intf0); WARN_ON(ictx->dev_present_intf1); usb_free_urb(ictx->rx_urb_intf1); kfree_rcu(ictx, rcu); dev_dbg(dev, "%s: iMON context freed\n", __func__); } /* * Called when the Display device (e.g. /dev/lcd0) * is opened by the application. */ static int display_open(struct inode *inode, struct file *file) { struct usb_interface *interface; struct imon_context *ictx = NULL; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&imon_driver, subminor); if (!interface) { pr_err("could not find interface for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_lock(); ictx = usb_get_intfdata(interface); if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) { rcu_read_unlock(); pr_err("no context found for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_unlock(); mutex_lock(&ictx->lock); if (ictx->disconnected) { retval = -ENODEV; } else if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (ictx->display_isopen) { pr_err("display port is already open\n"); retval = -EBUSY; } else { ictx->display_isopen = true; file->private_data = ictx; dev_dbg(ictx->dev, "display port opened\n"); } mutex_unlock(&ictx->lock); if (retval && refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); exit: return retval; } /* * Called when the display device (e.g. /dev/lcd0) * is closed by the application. */ static int display_close(struct inode *inode, struct file *file) { struct imon_context *ictx = file->private_data; int retval = 0; mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (!ictx->display_isopen) { pr_err("display is not open\n"); retval = -EIO; } else { ictx->display_isopen = false; dev_dbg(ictx->dev, "display port closed\n"); } mutex_unlock(&ictx->lock); if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); return retval; } /* * Sends a packet to the device -- this function must be called with * ictx->lock held, or its unlock/lock sequence while waiting for tx * to complete can/will lead to a deadlock. */ static int send_packet(struct imon_context *ictx) { unsigned int pipe; unsigned long timeout; int interval = 0; int retval = 0; struct usb_ctrlrequest *control_req = NULL; lockdep_assert_held(&ictx->lock); if (ictx->disconnected) return -ENODEV; /* Check if we need to use control or interrupt urb */ if (!ictx->tx_control) { pipe = usb_sndintpipe(ictx->usbdev_intf0, ictx->tx_endpoint->bEndpointAddress); interval = ictx->tx_endpoint->bInterval; usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx, interval); ictx->tx_urb->actual_length = 0; } else { /* fill request into kmalloc'ed space: */ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL); if (control_req == NULL) return -ENOMEM; /* setup packet is '21 09 0200 0001 0008' */ control_req->bRequestType = 0x21; control_req->bRequest = 0x09; control_req->wValue = cpu_to_le16(0x0200); control_req->wIndex = cpu_to_le16(0x0001); control_req->wLength = cpu_to_le16(0x0008); /* control pipe is endpoint 0x00 */ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0); /* build the control urb */ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, (unsigned char *)control_req, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx); ictx->tx_urb->actual_length = 0; } reinit_completion(&ictx->tx.finished); ictx->tx.busy = true; smp_rmb(); /* ensure later readers know we're busy */ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL); if (retval) { ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ pr_err_ratelimited("error submitting urb(%d)\n", retval); } else { /* Wait for transmission to complete (or abort or timeout) */ retval = wait_for_completion_interruptible_timeout(&ictx->tx.finished, 10 * HZ); if (retval <= 0) { usb_kill_urb(ictx->tx_urb); pr_err_ratelimited("task interrupted\n"); if (retval < 0) ictx->tx.status = retval; else ictx->tx.status = -ETIMEDOUT; } ictx->tx.busy = false; retval = ictx->tx.status; if (retval) pr_err_ratelimited("packet tx failed (%d)\n", retval); } kfree(control_req); /* * Induce a mandatory delay before returning, as otherwise, * send_packet can get called so rapidly as to overwhelm the device, * particularly on faster systems and/or those with quirky usb. */ timeout = msecs_to_jiffies(ictx->send_packet_delay); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout); return retval; } /* * Sends an associate packet to the iMON 2.4G. * * This might not be such a good idea, since it has an id collision with * some versions of the "IR & VFD" combo. The only way to determine if it * is an RF version is to look at the product description string. (Which * we currently do not fetch). */ static int send_associate_24g(struct imon_context *ictx) { const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20 }; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } if (!ictx->dev_present_intf0) { pr_err("no iMON device present\n"); return -ENODEV; } memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); return send_packet(ictx); } /* * Sends packets to setup and show clock on iMON display * * Arguments: year - last 2 digits of year, month - 1..12, * day - 1..31, dow - day of the week (0-Sun...6-Sat), * hour - 0..23, minute - 0..59, second - 0..59 */ static int send_set_imon_clock(struct imon_context *ictx, unsigned int year, unsigned int month, unsigned int day, unsigned int dow, unsigned int hour, unsigned int minute, unsigned int second) { unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8]; int retval = 0; int i; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } switch (ictx->display_type) { case IMON_DISPLAY_TYPE_LCD: clock_enable_pkt[0][0] = 0x80; clock_enable_pkt[0][1] = year; clock_enable_pkt[0][2] = month-1; clock_enable_pkt[0][3] = day; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[1][0] = 0x80; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 0; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; if (ictx->product == 0xffdc) { clock_enable_pkt[0][7] = 0x50; clock_enable_pkt[1][7] = 0x51; } else { clock_enable_pkt[0][7] = 0x88; clock_enable_pkt[1][7] = 0x8a; } break; case IMON_DISPLAY_TYPE_VFD: clock_enable_pkt[0][0] = year; clock_enable_pkt[0][1] = month-1; clock_enable_pkt[0][2] = day; clock_enable_pkt[0][3] = dow; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[0][7] = 0x40; clock_enable_pkt[1][0] = 0; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 1; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; clock_enable_pkt[1][7] = 0x42; break; default: return -ENODEV; } for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) { memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8); retval = send_packet(ictx); if (retval) { pr_err("send_packet failed for packet %d\n", i); break; } } return retval; } /* * These are the sysfs functions to handle the association on the iMON 2.4G LT. */ static ssize_t associate_remote_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (ictx->rf_isassociating) strscpy(buf, "associating\n", PAGE_SIZE); else strscpy(buf, "closed\n", PAGE_SIZE); dev_info(d, "Visit https://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n"); mutex_unlock(&ictx->lock); return strlen(buf); } static ssize_t associate_remote_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx; ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); ictx->rf_isassociating = true; send_associate_24g(ictx); mutex_unlock(&ictx->lock); return count; } /* * sysfs functions to control internal imon clock */ static ssize_t imon_clock_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); size_t len; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { len = sysfs_emit(buf, "Not supported."); } else { len = sysfs_emit(buf, "To set the clock on your iMON display:\n" "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n" "%s", ictx->display_isopen ? "\nNOTE: imon device must be closed\n" : ""); } mutex_unlock(&ictx->lock); return len; } static ssize_t imon_clock_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx = dev_get_drvdata(d); ssize_t retval; unsigned int year, month, day, dow, hour, minute, second; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { retval = -ENODEV; goto exit; } else if (ictx->display_isopen) { retval = -EBUSY; goto exit; } if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow, &hour, &minute, &second) != 7) { retval = -EINVAL; goto exit; } if ((month < 1 || month > 12) || (day < 1 || day > 31) || (dow > 6) || (hour > 23) || (minute > 59) || (second > 59)) { retval = -EINVAL; goto exit; } retval = send_set_imon_clock(ictx, year, month, day, dow, hour, minute, second); if (retval) goto exit; retval = count; exit: mutex_unlock(&ictx->lock); return retval; } static DEVICE_ATTR_RW(imon_clock); static DEVICE_ATTR_RW(associate_remote); static struct attribute *imon_display_sysfs_entries[] = { &dev_attr_imon_clock.attr, NULL }; static const struct attribute_group imon_display_attr_group = { .attrs = imon_display_sysfs_entries }; static struct attribute *imon_rf_sysfs_entries[] = { &dev_attr_associate_remote.attr, NULL }; static const struct attribute_group imon_rf_attr_group = { .attrs = imon_rf_sysfs_entries }; /* * Writes data to the VFD. The iMON VFD is 2x16 characters * and requires data in 5 consecutive USB interrupt packets, * each packet but the last carrying 7 bytes. * * I don't know if the VFD board supports features such as * scrolling, clearing rows, blanking, etc. so at * the caller must provide a full screen of data. If fewer * than 32 bytes are provided spaces will be appended to * generate a full screen. */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int i; int offset; int seq; int retval = 0; struct imon_context *ictx = file->private_data; static const unsigned char vfd_packet6[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; if (mutex_lock_interruptible(&ictx->lock)) return -ERESTARTSYS; if (ictx->disconnected) { retval = -ENODEV; goto exit; } if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); retval = -ENODEV; goto exit; } if (n_bytes <= 0 || n_bytes > 32) { pr_err_ratelimited("invalid payload size\n"); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) { retval = -EFAULT; goto exit; } /* Pad with spaces */ for (i = n_bytes; i < 32; ++i) ictx->tx.data_buf[i] = ' '; for (i = 32; i < 35; ++i) ictx->tx.data_buf[i] = 0xFF; offset = 0; seq = 0; do { memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet #%d failed\n", seq / 2); goto exit; } else { seq += 2; offset += 7; } } while (offset < 35); /* Send packet #6 */ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) pr_err_ratelimited("send packet #%d failed\n", seq / 2); exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte * packets. We accept data as 16 hexadecimal digits, followed by a * newline (to make it easy to drive the device from a command-line * -- even though the actual binary data is a bit complicated). * * The device itself is not a "traditional" text-mode display. It's * actually a 16x96 pixel bitmap display. That means if you want to * display text, you've got to have your own "font" and translate the * text into bitmaps for display. This is really flexible (you can * display whatever diacritics you need, and so on), but it's also * a lot more complicated than most LCDs... */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int retval = 0; struct imon_context *ictx = file->private_data; mutex_lock(&ictx->lock); if (ictx->disconnected) { retval = -ENODEV; goto exit; } if (!ictx->display_supported) { pr_err_ratelimited("no iMON display present\n"); retval = -ENODEV; goto exit; } if (n_bytes != 8) { pr_err_ratelimited("invalid payload size: %d (expected 8)\n", (int)n_bytes); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->usb_tx_buf, buf, 8)) { retval = -EFAULT; goto exit; } retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet failed!\n"); goto exit; } else { dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n", __func__, (int) n_bytes); } exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Callback function for USB core API: transmit data */ static void usb_tx_callback(struct urb *urb) { struct imon_context *ictx; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; ictx->tx.status = urb->status; /* notify waiters that write has finished */ ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ complete(&ictx->tx.finished); } /* * report touchscreen input */ static void imon_touch_display_timeout(struct timer_list *t) { struct imon_context *ictx = timer_container_of(ictx, t, ttimer); if (ictx->display_type != IMON_DISPLAY_TYPE_VGA) return; input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x00); input_sync(ictx->touch); } /* * iMON IR receivers support two different signal sets -- those used by * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. * * This function gets called two different ways, one way is from * rc_register_device, for initial protocol selection/setup, and the other is * via a userspace-initiated protocol change request, either by direct sysfs * prodding or by something like ir-keytable. In the rc_register_device case, * the imon context lock is already held, but when initiated from userspace, * it is not, so we must acquire it prior to calling send_packet, which * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; const bool unlock = mutex_trylock(&ictx->lock); unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; if (*rc_proto && !(*rc_proto & rc->allowed_protocols)) dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n"); if (*rc_proto & RC_PROTO_BIT_RC6_MCE) { dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); ir_proto_packet[0] = 0x01; *rc_proto = RC_PROTO_BIT_RC6_MCE; } else if (*rc_proto & RC_PROTO_BIT_IMON) { dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } else { dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); retval = send_packet(ictx); if (retval) goto out; ictx->rc_proto = *rc_proto; ictx->pad_mouse = false; out: if (unlock) mutex_unlock(&ictx->lock); return retval; } /* * The directional pad behaves a bit differently, depending on whether this is * one of the older ffdc devices or a newer device. Newer devices appear to * have a higher resolution matrix for more precise mouse movement, but it * makes things overly sensitive in keyboard mode, so we do some interesting * contortions to make it less touchy. Older devices run through the same * routine with shorter timeout and a smaller threshold. */ static int stabilize(int a, int b, u16 timeout, u16 threshold) { ktime_t ct; static ktime_t prev_time; static ktime_t hit_time; static int x, y, prev_result, hits; int result = 0; long msec, msec_hit; ct = ktime_get(); msec = ktime_ms_delta(ct, prev_time); msec_hit = ktime_ms_delta(ct, hit_time); if (msec > 100) { x = 0; y = 0; hits = 0; } x += a; y += b; prev_time = ct; if (abs(x) > threshold || abs(y) > threshold) { if (abs(y) > abs(x)) result = (y > 0) ? 0x7F : 0x80; else result = (x > 0) ? 0x7F00 : 0x8000; x = 0; y = 0; if (result == prev_result) { hits++; if (hits > 3) { switch (result) { case 0x7F: y = 17 * threshold / 30; break; case 0x80: y -= 17 * threshold / 30; break; case 0x7F00: x = 17 * threshold / 30; break; case 0x8000: x -= 17 * threshold / 30; break; } } if (hits == 2 && msec_hit < timeout) { result = 0; hits = 1; } } else { prev_result = result; hits = 1; hit_time = ct; } } return result; } static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; u32 release; bool is_release_code = false; /* Look for the initial press of a button */ keycode = rc_g_keycode_from_table(ictx->rdev, scancode); ictx->rc_toggle = 0x0; ictx->rc_scancode = scancode; /* Look for the release of a button */ if (keycode == KEY_RESERVED) { release = scancode & ~0x4000; keycode = rc_g_keycode_from_table(ictx->rdev, release); if (keycode != KEY_RESERVED) is_release_code = true; } ictx->release_code = is_release_code; return keycode; } static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; #define MCE_KEY_MASK 0x7000 #define MCE_TOGGLE_BIT 0x8000 /* * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx * (the toggle bit flipping between alternating key presses), while * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep * the table trim, we always or in the bits to look up 0x8000ff4xx, * but we can't or them into all codes, as some keys are decoded in * a different way w/o the same use of the toggle bit... */ if (scancode & 0x80000000) scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT; ictx->rc_scancode = scancode; keycode = rc_g_keycode_from_table(ictx->rdev, scancode); /* not used in mce mode, but make sure we know its false */ ictx->release_code = false; return keycode; } static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code) { const struct imon_panel_key_table *key_table; u32 keycode = KEY_RESERVED; int i; key_table = ictx->dev_descr->key_table; for (i = 0; key_table[i].hw_code != 0; i++) { if (key_table[i].hw_code == (code | 0xffee)) { keycode = key_table[i].keycode; break; } } ictx->release_code = false; return keycode; } static bool imon_mouse_event(struct imon_context *ictx, unsigned char *buf, int len) { signed char rel_x = 0x00, rel_y = 0x00; u8 right_shift = 1; bool mouse_input = true; int dir = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* newer iMON device PAD or mouse button */ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) { rel_x = buf[2]; rel_y = buf[3]; right_shift = 1; /* 0xffdc iMON PAD or mouse button input */ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) && !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) { rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x0f; rel_x = rel_x + rel_x / 2; rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x0f; rel_y = rel_y + rel_y / 2; right_shift = 2; /* some ffdc devices decode mouse buttons differently... */ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) { right_shift = 2; /* ch+/- buttons, which we use for an emulated scroll wheel */ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) { dir = 1; } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) { dir = -1; } else mouse_input = false; spin_unlock_irqrestore(&ictx->kc_lock, flags); if (mouse_input) { dev_dbg(ictx->dev, "sending mouse data via input subsystem\n"); if (dir) { input_report_rel(ictx->idev, REL_WHEEL, dir); } else if (rel_x || rel_y) { input_report_rel(ictx->idev, REL_X, rel_x); input_report_rel(ictx->idev, REL_Y, rel_y); } else { input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1); input_report_key(ictx->idev, BTN_RIGHT, buf[1] >> right_shift & 0x1); } input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return mouse_input; } static void imon_touch_event(struct imon_context *ictx, unsigned char *buf) { mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT); ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4); ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf)); input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x01); input_sync(ictx->touch); } static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) { int dir = 0; signed char rel_x = 0x00, rel_y = 0x00; u16 timeout, threshold; u32 scancode = KEY_RESERVED; unsigned long flags; /* * The imon directional pad functions more like a touchpad. Bytes 3 & 4 * contain a position coordinate (x,y), with each component ranging * from -14 to 14. We want to down-sample this to only 4 discrete values * for up/down/left/right arrow keys. Also, when you get too close to * diagonals, it has a tendency to jump back and forth, so lets try to * ignore when they get too close. */ if (ictx->product != 0xffdc) { /* first, pad to 8 bytes so it conforms with everything else */ buf[5] = buf[6] = buf[7] = 0; timeout = 500; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 28; rel_x = buf[2]; rel_y = buf[3]; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } /* * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad * device (15c2:ffdc). The remote generates various codes from * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates * 0x688301b7 and the right one 0x688481b7. All other keys generate * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with * reversed endianness. Extract direction from buffer, rotate endianness, * adjust sign and feed the values into stabilize(). The resulting codes * will be 0x01008000, 0x01007F00, which match the newer devices. */ } else { timeout = 10; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 15; /* buf[1] is x */ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x10+1; /* buf[2] is y */ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x10+1; buf[0] = 0x01; buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } } if (scancode) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = imon_remote_key_lookup(ictx, scancode); spin_unlock_irqrestore(&ictx->kc_lock, flags); } } /* * figure out if these is a press or a release. We don't actually * care about repeats, as those will be auto-generated within the IR * subsystem for repeating scancodes. */ static int imon_parse_press_type(struct imon_context *ictx, unsigned char *buf, u8 ktype) { int press_type = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* key release of 0x02XXXXXX key */ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00) ictx->kc = ictx->last_keycode; /* mouse button release on (some) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mouse button release on (some other) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mce-specific button handling, no keyup events */ else if (ktype == IMON_KEY_MCE) { ictx->rc_toggle = buf[2]; press_type = 1; /* incoherent or irrelevant data */ } else if (ictx->kc == KEY_RESERVED) press_type = -EINVAL; /* key release of 0xXXXXXXb7 key */ else if (ictx->release_code) press_type = 0; /* this is a button press */ else press_type = 1; spin_unlock_irqrestore(&ictx->kc_lock, flags); return press_type; } /* * Process the incoming packet */ static void imon_incoming_packet(struct imon_context *ictx, struct urb *urb, int intf) { int len = urb->actual_length; unsigned char *buf = urb->transfer_buffer; struct device *dev = ictx->dev; unsigned long flags; u32 kc; u64 scancode; int press_type = 0; ktime_t t; static ktime_t prev_time; u8 ktype; /* filter out junk data on the older 0xffdc imon devices */ if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff)) return; /* Figure out what key was pressed */ if (len == 8 && buf[7] == 0xee) { scancode = be64_to_cpu(*((__be64 *)buf)); ktype = IMON_KEY_PANEL; kc = imon_panel_key_lookup(ictx, scancode); ictx->release_code = false; } else { scancode = be32_to_cpu(*((__be32 *)buf)); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) { ktype = IMON_KEY_IMON; if (buf[0] == 0x80) ktype = IMON_KEY_MCE; kc = imon_mce_key_lookup(ictx, scancode); } else { ktype = IMON_KEY_IMON; kc = imon_remote_key_lookup(ictx, scancode); } } spin_lock_irqsave(&ictx->kc_lock, flags); /* keyboard/mouse mode toggle button */ if (kc == KEY_KEYBOARD && !ictx->release_code) { ictx->last_keycode = kc; if (!nomouse) { ictx->pad_mouse = !ictx->pad_mouse; dev_dbg(dev, "toggling to %s mode\n", ictx->pad_mouse ? "mouse" : "keyboard"); spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } else { ictx->pad_mouse = false; dev_dbg(dev, "mouse mode disabled, passing key value\n"); } } ictx->kc = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; /* look for mouse events with pad in mouse mode */ } else if (ictx->pad_mouse) { if (imon_mouse_event(ictx, buf, len)) return; } /* Now for some special handling to convert pad input to arrow keys */ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) || ((len == 8) && (buf[0] & 0x40) && !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) { len = 8; imon_pad_to_keys(ictx, buf); } if (debug) { printk(KERN_INFO "intf%d decoded packet: %*ph\n", intf, len, buf); } press_type = imon_parse_press_type(ictx, buf, ktype); if (press_type < 0) goto not_input_data; if (ktype != IMON_KEY_PANEL) { if (press_type == 0) rc_keyup(ictx->rdev); else { enum rc_proto proto; if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) proto = RC_PROTO_RC6_MCE; else if (ictx->rc_proto == RC_PROTO_BIT_IMON) proto = RC_PROTO_IMON; else return; rc_keydown(ictx->rdev, proto, ictx->rc_scancode, ictx->rc_toggle); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return; } /* Only panel type events left to process now */ spin_lock_irqsave(&ictx->kc_lock, flags); t = ktime_get(); /* KEY repeats from knob and panel that need to be suppressed */ if (ictx->kc == KEY_MUTE || ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) { if (ictx->kc == ictx->last_keycode && ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) { spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } } prev_time = t; kc = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); input_report_key(ictx->idev, kc, press_type); input_sync(ictx->idev); /* panel keys don't generate a release */ input_report_key(ictx->idev, kc, 0); input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; not_input_data: if (len != 8) { dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n", __func__, len, intf); return; } /* iMON 2.4G associate frame */ if (buf[0] == 0x00 && buf[2] == 0xFF && /* REFID */ buf[3] == 0xFF && buf[4] == 0xFF && buf[5] == 0xFF && /* iMON 2.4G */ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */ dev_warn(dev, "%s: remote associated refid=%02X\n", __func__, buf[1]); ictx->rf_isassociating = false; } } /* * Callback function for USB core API: receive data */ static void usb_rx_callback_intf0(struct urb *urb) { struct imon_context *ictx; int intfnum = 0; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (ictx->dev_present_intf0) imon_incoming_packet(ictx, urb, intfnum); break; case -ECONNRESET: case -EILSEQ: case -EPROTO: case -EPIPE: dev_warn(ictx->dev, "imon %s: status(%d)\n", __func__, urb->status); return; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); } static void usb_rx_callback_intf1(struct urb *urb) { struct imon_context *ictx; int intfnum = 1; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (ictx->dev_present_intf1) imon_incoming_packet(ictx, urb, intfnum); break; case -ECONNRESET: case -EILSEQ: case -EPROTO: case -EPIPE: dev_warn(ictx->dev, "imon %s: status(%d)\n", __func__, urb->status); return; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); } /* * The 0x15c2:0xffdc device ID was used for umpteen different imon * devices, and all of them constantly spew interrupts, even when there * is no actual data to report. However, byte 6 of this buffer looks like * its unique across device variants, so we're trying to key off that to * figure out which display type (if any) and what IR protocol the device * actually supports. These devices have their IR protocol hard-coded into * their firmware, they can't be changed on the fly like the newer hardware. */ static void imon_get_ffdc_type(struct imon_context *ictx) { u8 ffdc_cfg_byte = ictx->usb_rx_buf[6]; u8 detected_display_type = IMON_DISPLAY_TYPE_NONE; u64 allowed_protos = RC_PROTO_BIT_IMON; switch (ffdc_cfg_byte) { /* iMON Knob, no display, iMON IR + vol knob */ case 0x21: dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR"); ictx->display_supported = false; break; /* iMON 2.4G LT (usb stick), no display, iMON RF */ case 0x4e: dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF"); ictx->display_supported = false; ictx->rf_device = true; break; /* iMON VFD, no IR (does have vol knob tho) */ case 0x35: dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, iMON IR */ case 0x24: case 0x30: case 0x85: dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, MCE IR */ case 0x46: case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* iMON VFD, iMON or MCE IR */ case 0x7e: dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* no display, iMON IR */ case 0x26: dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR"); ictx->display_supported = false; break; /* Soundgraph iMON UltraBay */ case 0x98: dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; ictx->dev_descr = &ultrabay_table; break; default: dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; /* * We don't know which one it is, allow user to set the * RC6 one from userspace if IMON wasn't correct. */ allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; } printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte); ictx->display_type = detected_display_type; ictx->rc_proto = allowed_protos; } static void imon_set_display_type(struct imon_context *ictx) { u8 configured_display_type = IMON_DISPLAY_TYPE_VFD; /* * Try to auto-detect the type of display if the user hasn't set * it by hand via the display_type modparam. Default is VFD. */ if (display_type == IMON_DISPLAY_TYPE_AUTO) { switch (ictx->product) { case 0xffdc: /* set in imon_get_ffdc_type() */ configured_display_type = ictx->display_type; break; case 0x0034: case 0x0035: configured_display_type = IMON_DISPLAY_TYPE_VGA; break; case 0x0038: case 0x0039: case 0x0045: configured_display_type = IMON_DISPLAY_TYPE_LCD; break; case 0x003c: case 0x0041: case 0x0042: case 0x0043: configured_display_type = IMON_DISPLAY_TYPE_NONE; ictx->display_supported = false; break; case 0x0036: case 0x0044: default: configured_display_type = IMON_DISPLAY_TYPE_VFD; break; } } else { configured_display_type = display_type; if (display_type == IMON_DISPLAY_TYPE_NONE) ictx->display_supported = false; else ictx->display_supported = true; dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n", __func__, display_type); } ictx->display_type = configured_display_type; } static struct rc_dev *imon_init_rdev(struct imon_context *ictx) { struct rc_dev *rdev; int ret; static const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 }; rdev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rdev) { dev_err(ictx->dev, "remote control dev allocation failed\n"); goto out; } snprintf(ictx->name_rdev, sizeof(ictx->name_rdev), "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product); usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev, sizeof(ictx->phys_rdev)); strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev)); rdev->device_name = ictx->name_rdev; rdev->input_phys = ictx->phys_rdev; usb_to_input_id(ictx->usbdev_intf0, &rdev->input_id); rdev->dev.parent = ictx->dev; rdev->priv = ictx; /* iMON PAD or MCE */ rdev->allowed_protocols = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; rdev->change_protocol = imon_ir_change_protocol; rdev->driver_name = MOD_NAME; /* Enable front-panel buttons and/or knobs */ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet)); ret = send_packet(ictx); /* Not fatal, but warn about it */ if (ret) dev_info(ictx->dev, "panel buttons/knobs setup failed\n"); if (ictx->product == 0xffdc) { imon_get_ffdc_type(ictx); rdev->allowed_protocols = ictx->rc_proto; } imon_set_display_type(ictx); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) rdev->map_name = RC_MAP_IMON_MCE; else rdev->map_name = RC_MAP_IMON_PAD; ret = rc_register_device(rdev); if (ret < 0) { dev_err(ictx->dev, "remote input dev register failed\n"); goto out; } return rdev; out: rc_free_device(rdev); return NULL; } static struct input_dev *imon_init_idev(struct imon_context *ictx) { const struct imon_panel_key_table *key_table; struct input_dev *idev; int ret, i; key_table = ictx->dev_descr->key_table; idev = input_allocate_device(); if (!idev) goto out; snprintf(ictx->name_idev, sizeof(ictx->name_idev), "iMON Panel, Knob and Mouse(%04x:%04x)", ictx->vendor, ictx->product); idev->name = ictx->name_idev; usb_make_path(ictx->usbdev_intf0, ictx->phys_idev, sizeof(ictx->phys_idev)); strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev)); idev->phys = ictx->phys_idev; idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL); idev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | BIT_MASK(REL_WHEEL); /* panel and/or knob code support */ for (i = 0; key_table[i].hw_code != 0; i++) { u32 kc = key_table[i].keycode; __set_bit(kc, idev->keybit); } usb_to_input_id(ictx->usbdev_intf0, &idev->id); idev->dev.parent = ictx->dev; input_set_drvdata(idev, ictx); ret = input_register_device(idev); if (ret < 0) { dev_err(ictx->dev, "input dev register failed\n"); goto out; } return idev; out: input_free_device(idev); return NULL; } static struct input_dev *imon_init_touch(struct imon_context *ictx) { struct input_dev *touch; int ret; touch = input_allocate_device(); if (!touch) goto touch_alloc_failed; snprintf(ictx->name_touch, sizeof(ictx->name_touch), "iMON USB Touchscreen (%04x:%04x)", ictx->vendor, ictx->product); touch->name = ictx->name_touch; usb_make_path(ictx->usbdev_intf1, ictx->phys_touch, sizeof(ictx->phys_touch)); strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch)); touch->phys = ictx->phys_touch; touch->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); touch->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(touch, ABS_X, 0x00, 0xfff, 0, 0); input_set_abs_params(touch, ABS_Y, 0x00, 0xfff, 0, 0); input_set_drvdata(touch, ictx); usb_to_input_id(ictx->usbdev_intf1, &touch->id); touch->dev.parent = ictx->dev; ret = input_register_device(touch); if (ret < 0) { dev_info(ictx->dev, "touchscreen input dev register failed\n"); goto touch_register_failed; } return touch; touch_register_failed: input_free_device(touch); touch_alloc_failed: return NULL; } static bool imon_find_endpoints(struct imon_context *ictx, struct usb_host_interface *iface_desc) { struct usb_endpoint_descriptor *ep; struct usb_endpoint_descriptor *rx_endpoint = NULL; struct usb_endpoint_descriptor *tx_endpoint = NULL; int ifnum = iface_desc->desc.bInterfaceNumber; int num_endpts = iface_desc->desc.bNumEndpoints; int i, ep_dir, ep_type; bool ir_ep_found = false; bool display_ep_found = false; bool tx_control = false; /* * Scan the endpoint list and set: * first input endpoint = IR endpoint * first output endpoint = display endpoint */ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { ep = &iface_desc->endpoint[i].desc; ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ep_type = usb_endpoint_type(ep); if (!ir_ep_found && ep_dir == USB_DIR_IN && ep_type == USB_ENDPOINT_XFER_INT) { rx_endpoint = ep; ir_ep_found = true; dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__); } else if (!display_ep_found && ep_dir == USB_DIR_OUT && ep_type == USB_ENDPOINT_XFER_INT) { tx_endpoint = ep; display_ep_found = true; dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__); } } if (ifnum == 0) { ictx->rx_endpoint_intf0 = rx_endpoint; /* * tx is used to send characters to lcd/vfd, associate RF * remotes, set IR protocol, and maybe more... */ ictx->tx_endpoint = tx_endpoint; } else { ictx->rx_endpoint_intf1 = rx_endpoint; } /* * If we didn't find a display endpoint, this is probably one of the * newer iMON devices that use control urb instead of interrupt */ if (!display_ep_found) { tx_control = true; display_ep_found = true; dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n", __func__); } /* * Some iMON receivers have no display. Unfortunately, it seems * that SoundGraph recycles device IDs between devices both with * and without... :\ */ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) { display_ep_found = false; dev_dbg(ictx->dev, "%s: device has no display\n", __func__); } /* * iMON Touch devices have a VGA touchscreen, but no "display", as * that refers to e.g. /dev/lcd0 (a character device LCD or VFD). */ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { display_ep_found = false; dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__); } /* Input endpoint is mandatory */ if (!ir_ep_found) pr_err("no valid input (IR) endpoint found\n"); ictx->tx_control = tx_control; if (display_ep_found) ictx->display_supported = true; return ir_ep_found; } static struct imon_context *imon_init_intf0(struct usb_interface *intf, const struct usb_device_id *id) { struct imon_context *ictx; struct urb *rx_urb; struct urb *tx_urb; struct device *dev = &intf->dev; struct usb_host_interface *iface_desc; int ret = -ENOMEM; ictx = kzalloc(sizeof(*ictx), GFP_KERNEL); if (!ictx) goto exit; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tx_urb) goto tx_urb_alloc_failed; mutex_init(&ictx->lock); spin_lock_init(&ictx->kc_lock); mutex_lock(&ictx->lock); ictx->dev = dev; ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf0 = rx_urb; ictx->tx_urb = tx_urb; ictx->rf_device = false; init_completion(&ictx->tx.finished); ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor); ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct); /* save drive info for later accessing the panel/knob key table */ ictx->dev_descr = (struct imon_usb_dev_descr *)id->driver_info; /* default send_packet delay is 5ms but some devices need more */ ictx->send_packet_delay = ictx->dev_descr->flags & IMON_NEED_20MS_PKT_DELAY ? 20 : 5; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) { goto find_endpoint_failed; } usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf0 (%d)\n", ret); goto urb_submit_failed; } ictx->idev = imon_init_idev(ictx); if (!ictx->idev) { dev_err(dev, "%s: input device setup failed\n", __func__); goto idev_setup_failed; } ictx->rdev = imon_init_rdev(ictx); if (!ictx->rdev) { dev_err(dev, "%s: rc device setup failed\n", __func__); goto rdev_setup_failed; } ictx->dev_present_intf0 = true; mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: input_unregister_device(ictx->idev); idev_setup_failed: usb_kill_urb(ictx->rx_urb_intf0); urb_submit_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf0); mutex_unlock(&ictx->lock); usb_free_urb(tx_urb); tx_urb_alloc_failed: usb_free_urb(rx_urb); rx_urb_alloc_failed: kfree(ictx); exit: dev_err(dev, "unable to initialize intf0, err %d\n", ret); return NULL; } static struct imon_context *imon_init_intf1(struct usb_interface *intf, struct imon_context *ictx) { struct urb *rx_urb; struct usb_host_interface *iface_desc; int ret = -ENOMEM; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; mutex_lock(&ictx->lock); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0); } ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf1 = rx_urb; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) goto find_endpoint_failed; if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ictx->touch = imon_init_touch(ictx); if (!ictx->touch) goto touch_setup_failed; } else ictx->touch = NULL; usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf1 (%d)\n", ret); goto urb_submit_failed; } ictx->dev_present_intf1 = true; mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: if (ictx->touch) input_unregister_device(ictx->touch); touch_setup_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf1); ictx->usbdev_intf1 = NULL; mutex_unlock(&ictx->lock); usb_free_urb(rx_urb); ictx->rx_urb_intf1 = NULL; rx_urb_alloc_failed: dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret); return NULL; } static void imon_init_display(struct imon_context *ictx, struct usb_interface *intf) { int ret; dev_dbg(ictx->dev, "Registering iMON display with sysfs\n"); /* set up sysfs entry for built-in clock */ ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group); if (ret) dev_err(ictx->dev, "Could not create display sysfs entries(%d)", ret); if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) ret = usb_register_dev(intf, &imon_lcd_class); else ret = usb_register_dev(intf, &imon_vfd_class); if (ret) /* Not a fatal error, so ignore */ dev_info(ictx->dev, "could not get a minor number for display\n"); } /* * Callback function for USB core API: Probe */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = NULL; struct usb_host_interface *iface_desc = NULL; struct usb_interface *first_if; struct device *dev = &interface->dev; int ifnum, sysfs_err; int ret = 0; struct imon_context *ictx = NULL; u16 vendor, product; usbdev = usb_get_dev(interface_to_usbdev(interface)); iface_desc = interface->cur_altsetting; ifnum = iface_desc->desc.bInterfaceNumber; vendor = le16_to_cpu(usbdev->descriptor.idVendor); product = le16_to_cpu(usbdev->descriptor.idProduct); dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", __func__, vendor, product, ifnum); first_if = usb_ifnum_to_if(usbdev, 0); if (!first_if) { ret = -ENODEV; goto fail; } if (first_if->dev.driver != interface->dev.driver) { dev_err(&interface->dev, "inconsistent driver matching\n"); ret = -EINVAL; goto fail; } if (ifnum == 0) { ictx = imon_init_intf0(interface, id); if (!ictx) { pr_err("failed to initialize context!\n"); ret = -ENODEV; goto fail; } refcount_set(&ictx->users, 1); } else { /* this is the secondary interface on the device */ struct imon_context *first_if_ctx = usb_get_intfdata(first_if); /* fail early if first intf failed to register */ if (!first_if_ctx) { ret = -ENODEV; goto fail; } ictx = imon_init_intf1(interface, first_if_ctx); if (!ictx) { pr_err("failed to attach to context!\n"); ret = -ENODEV; goto fail; } refcount_inc(&ictx->users); } usb_set_intfdata(interface, ictx); if (ifnum == 0) { if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); if (sysfs_err) pr_err("Could not create RF sysfs entries(%d)\n", sysfs_err); } if (ictx->display_supported) imon_init_display(ictx, interface); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); usb_put_dev(usbdev); return 0; fail: usb_put_dev(usbdev); dev_err(dev, "unable to register, err %d\n", ret); return ret; } /* * Callback function for USB core API: disconnect */ static void imon_disconnect(struct usb_interface *interface) { struct imon_context *ictx; struct device *dev; int ifnum; ictx = usb_get_intfdata(interface); mutex_lock(&ictx->lock); ictx->disconnected = true; mutex_unlock(&ictx->lock); dev = ictx->dev; ifnum = interface->cur_altsetting->desc.bInterfaceNumber; /* * sysfs_remove_group is safe to call even if sysfs_create_group * hasn't been called */ sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group); sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group); usb_set_intfdata(interface, NULL); /* Abort ongoing write */ if (ictx->tx.busy) { usb_kill_urb(ictx->tx_urb); complete(&ictx->tx.finished); } if (ifnum == 0) { ictx->dev_present_intf0 = false; usb_kill_urb(ictx->rx_urb_intf0); input_unregister_device(ictx->idev); rc_unregister_device(ictx->rdev); if (ictx->display_supported) { if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) usb_deregister_dev(interface, &imon_lcd_class); else if (ictx->display_type == IMON_DISPLAY_TYPE_VFD) usb_deregister_dev(interface, &imon_vfd_class); } usb_put_dev(ictx->usbdev_intf0); } else { ictx->dev_present_intf1 = false; usb_kill_urb(ictx->rx_urb_intf1); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { timer_delete_sync(&ictx->ttimer); input_unregister_device(ictx->touch); } usb_put_dev(ictx->usbdev_intf1); } if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n", __func__, ifnum); } static int imon_suspend(struct usb_interface *intf, pm_message_t message) { struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) usb_kill_urb(ictx->rx_urb_intf0); else usb_kill_urb(ictx->rx_urb_intf1); return 0; } static int imon_resume(struct usb_interface *intf) { int rc = 0; struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) { usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_NOIO); } else { usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_NOIO); } return rc; } module_usb_driver(imon_driver); |
| 2 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/power/user.c * * This file provides the user space interface for software suspend/resume. * * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> */ #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pm.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/console.h> #include <linux/cpu.h> #include <linux/freezer.h> #include <linux/uaccess.h> #include "power.h" static bool need_wait; static struct snapshot_data { struct snapshot_handle handle; int swap; int mode; bool frozen; bool ready; bool platform_support; bool free_bitmaps; dev_t dev; } snapshot_state; int is_hibernate_resume_dev(dev_t dev) { return hibernation_available() && snapshot_state.dev == dev; } static int snapshot_open(struct inode *inode, struct file *filp) { struct snapshot_data *data; unsigned int sleep_flags; int error; if (!hibernation_available()) return -EPERM; sleep_flags = lock_system_sleep(); if (!hibernate_acquire()) { error = -EBUSY; goto Unlock; } if ((filp->f_flags & O_ACCMODE) == O_RDWR) { hibernate_release(); error = -ENOSYS; goto Unlock; } nonseekable_open(inode, filp); data = &snapshot_state; filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { /* Hibernating. The image device should be accessible. */ data->swap = swap_type_of(swsusp_resume_device, 0); data->mode = O_RDONLY; data->free_bitmaps = false; error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); } else { /* * Resuming. We may need to wait for the image device to * appear. */ need_wait = true; data->swap = -1; data->mode = O_WRONLY; error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); if (!error) { error = create_basic_memory_bitmaps(); data->free_bitmaps = !error; } } if (error) hibernate_release(); data->frozen = false; data->ready = false; data->platform_support = false; data->dev = 0; Unlock: unlock_system_sleep(sleep_flags); return error; } static int snapshot_release(struct inode *inode, struct file *filp) { struct snapshot_data *data; unsigned int sleep_flags; sleep_flags = lock_system_sleep(); swsusp_free(); data = filp->private_data; data->dev = 0; free_all_swap_pages(data->swap); if (data->frozen) { pm_restore_gfp_mask(); free_basic_memory_bitmaps(); thaw_processes(); } else if (data->free_bitmaps) { free_basic_memory_bitmaps(); } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); hibernate_release(); unlock_system_sleep(sleep_flags); return 0; } static ssize_t snapshot_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) { loff_t pg_offp = *offp & ~PAGE_MASK; struct snapshot_data *data; unsigned int sleep_flags; ssize_t res; sleep_flags = lock_system_sleep(); data = filp->private_data; if (!data->ready) { res = -ENODATA; goto Unlock; } if (!pg_offp) { /* on page boundary? */ res = snapshot_read_next(&data->handle); if (res <= 0) goto Unlock; } else { res = PAGE_SIZE - pg_offp; } res = simple_read_from_buffer(buf, count, &pg_offp, data_of(data->handle), res); if (res > 0) *offp += res; Unlock: unlock_system_sleep(sleep_flags); return res; } static ssize_t snapshot_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) { loff_t pg_offp = *offp & ~PAGE_MASK; struct snapshot_data *data; unsigned long sleep_flags; ssize_t res; if (need_wait) { wait_for_device_probe(); need_wait = false; } sleep_flags = lock_system_sleep(); data = filp->private_data; if (!pg_offp) { res = snapshot_write_next(&data->handle); if (res <= 0) goto unlock; } else { res = PAGE_SIZE; } if (!data_of(data->handle)) { res = -EINVAL; goto unlock; } res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, buf, count); if (res > 0) *offp += res; unlock: unlock_system_sleep(sleep_flags); return res; } struct compat_resume_swap_area { compat_loff_t offset; u32 dev; } __packed; static int snapshot_set_swap_area(struct snapshot_data *data, void __user *argp) { sector_t offset; dev_t swdev; if (swsusp_swap_in_use()) return -EPERM; if (in_compat_syscall()) { struct compat_resume_swap_area swap_area; if (copy_from_user(&swap_area, argp, sizeof(swap_area))) return -EFAULT; swdev = new_decode_dev(swap_area.dev); offset = swap_area.offset; } else { struct resume_swap_area swap_area; if (copy_from_user(&swap_area, argp, sizeof(swap_area))) return -EFAULT; swdev = new_decode_dev(swap_area.dev); offset = swap_area.offset; } /* * User space encodes device types as two-byte values, * so we need to recode them */ data->swap = swap_type_of(swdev, offset); if (data->swap < 0) return swdev ? -ENODEV : -EINVAL; data->dev = swdev; return 0; } static long snapshot_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t size; sector_t offset; if (need_wait) { wait_for_device_probe(); need_wait = false; } if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!mutex_trylock(&system_transition_mutex)) return -EBUSY; lock_device_hotplug(); data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; ksys_sync_helper(); error = freeze_processes(); if (error) break; error = create_basic_memory_bitmaps(); if (error) thaw_processes(); else data->frozen = true; break; case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; pm_restore_gfp_mask(); free_basic_memory_bitmaps(); data->free_bitmaps = false; thaw_processes(); data->frozen = false; break; case SNAPSHOT_CREATE_IMAGE: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } pm_restore_gfp_mask(); error = hibernation_snapshot(data->platform_support); if (!error) { error = put_user(in_suspend, (int __user *)arg); data->ready = !freezer_test_done && !error; freezer_test_done = false; } break; case SNAPSHOT_ATOMIC_RESTORE: error = snapshot_write_finalize(&data->handle); if (error) break; if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } error = hibernation_restore(data->platform_support); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = false; /* * It is necessary to thaw kernel threads here, because * SNAPSHOT_CREATE_IMAGE may be invoked directly after * SNAPSHOT_FREE. In that case, if kernel threads were not * thawed, the preallocation of memory carried out by * hibernation_snapshot() might run into problems (i.e. it * might fail or even deadlock). */ thaw_kernel_threads(); break; case SNAPSHOT_PREF_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_GET_IMAGE_SIZE: if (!data->ready) { error = -ENODATA; break; } size = snapshot_get_image_size(); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_AVAIL_SWAP_SIZE: size = count_swap_pages(data->swap, 1); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_ALLOC_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } offset = alloc_swapdev_block(data->swap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap); break; case SNAPSHOT_S2RAM: if (!data->frozen) { error = -EPERM; break; } /* * Tasks are frozen and the notifiers have been called with * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); data->ready = false; break; case SNAPSHOT_PLATFORM_SUPPORT: data->platform_support = !!arg; break; case SNAPSHOT_POWER_OFF: if (data->platform_support) error = hibernation_platform_enter(); break; case SNAPSHOT_SET_SWAP_AREA: error = snapshot_set_swap_area(data, (void __user *)arg); break; default: error = -ENOTTY; } unlock_device_hotplug(); mutex_unlock(&system_transition_mutex); return error; } #ifdef CONFIG_COMPAT static long snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t)); switch (cmd) { case SNAPSHOT_GET_IMAGE_SIZE: case SNAPSHOT_AVAIL_SWAP_SIZE: case SNAPSHOT_ALLOC_SWAP_PAGE: case SNAPSHOT_CREATE_IMAGE: case SNAPSHOT_SET_SWAP_AREA: return snapshot_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); default: return snapshot_ioctl(file, cmd, arg); } } #endif /* CONFIG_COMPAT */ static const struct file_operations snapshot_fops = { .open = snapshot_open, .release = snapshot_release, .read = snapshot_read, .write = snapshot_write, .unlocked_ioctl = snapshot_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = snapshot_compat_ioctl, #endif }; static struct miscdevice snapshot_device = { .minor = SNAPSHOT_MINOR, .name = "snapshot", .fops = &snapshot_fops, }; static int __init snapshot_device_init(void) { return misc_register(&snapshot_device); }; device_initcall(snapshot_device_init); |
| 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | // SPDX-License-Identifier: GPL-2.0 /* * Generic USB GNSS receiver driver * * Copyright (C) 2021 Johan Hovold <johan@kernel.org> */ #include <linux/errno.h> #include <linux/gnss.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #define GNSS_USB_READ_BUF_LEN 512 #define GNSS_USB_WRITE_TIMEOUT 1000 static const struct usb_device_id gnss_usb_id_table[] = { { USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */ { } }; MODULE_DEVICE_TABLE(usb, gnss_usb_id_table); struct gnss_usb { struct usb_device *udev; struct usb_interface *intf; struct gnss_device *gdev; struct urb *read_urb; unsigned int write_pipe; }; static void gnss_usb_rx_complete(struct urb *urb) { struct gnss_usb *gusb = urb->context; struct gnss_device *gdev = gusb->gdev; int status = urb->status; int len; int ret; switch (status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&gdev->dev, "urb stopped: %d\n", status); return; case -EPIPE: dev_err(&gdev->dev, "urb stopped: %d\n", status); return; default: dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status); goto resubmit; } len = urb->actual_length; if (len == 0) goto resubmit; ret = gnss_insert_raw(gdev, urb->transfer_buffer, len); if (ret < len) dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret); resubmit: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret && ret != -EPERM && ret != -ENODEV) dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret); } static int gnss_usb_open(struct gnss_device *gdev) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); int ret; ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL); if (ret) { if (ret != -EPERM && ret != -ENODEV) dev_err(&gdev->dev, "failed to submit urb: %d\n", ret); return ret; } return 0; } static void gnss_usb_close(struct gnss_device *gdev) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); usb_kill_urb(gusb->read_urb); } static int gnss_usb_write_raw(struct gnss_device *gdev, const unsigned char *buf, size_t count) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); void *tbuf; int ret; tbuf = kmemdup(buf, count, GFP_KERNEL); if (!tbuf) return -ENOMEM; ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL, GNSS_USB_WRITE_TIMEOUT); kfree(tbuf); if (ret) return ret; return count; } static const struct gnss_operations gnss_usb_gnss_ops = { .open = gnss_usb_open, .close = gnss_usb_close, .write_raw = gnss_usb_write_raw, }; static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *in, *out; struct gnss_device *gdev; struct gnss_usb *gusb; struct urb *urb; size_t buf_len; void *buf; int ret; ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (ret) return ret; gusb = kzalloc(sizeof(*gusb), GFP_KERNEL); if (!gusb) return -ENOMEM; gdev = gnss_allocate_device(&intf->dev); if (!gdev) { ret = -ENOMEM; goto err_free_gusb; } gdev->ops = &gnss_usb_gnss_ops; gdev->type = GNSS_TYPE_NMEA; gnss_set_drvdata(gdev, gusb); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { ret = -ENOMEM; goto err_put_gdev; } buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_urb; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, usb_endpoint_num(in)), buf, buf_len, gnss_usb_rx_complete, gusb); gusb->intf = intf; gusb->udev = udev; gusb->gdev = gdev; gusb->read_urb = urb; gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out)); ret = gnss_register_device(gdev); if (ret) goto err_free_buf; usb_set_intfdata(intf, gusb); return 0; err_free_buf: kfree(buf); err_free_urb: usb_free_urb(urb); err_put_gdev: gnss_put_device(gdev); err_free_gusb: kfree(gusb); return ret; } static void gnss_usb_disconnect(struct usb_interface *intf) { struct gnss_usb *gusb = usb_get_intfdata(intf); gnss_deregister_device(gusb->gdev); kfree(gusb->read_urb->transfer_buffer); usb_free_urb(gusb->read_urb); gnss_put_device(gusb->gdev); kfree(gusb); } static struct usb_driver gnss_usb_driver = { .name = "gnss-usb", .probe = gnss_usb_probe, .disconnect = gnss_usb_disconnect, .id_table = gnss_usb_id_table, }; module_usb_driver(gnss_usb_driver); MODULE_AUTHOR("Johan Hovold <johan@kernel.org>"); MODULE_DESCRIPTION("Generic USB GNSS receiver driver"); MODULE_LICENSE("GPL v2"); |
| 35 35 31 31 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMENS_H #define _LINUX_TIMENS_H #include <linux/sched.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> #include <linux/time64.h> struct user_namespace; extern struct user_namespace init_user_ns; struct seq_file; struct vm_area_struct; struct timens_offsets { struct timespec64 monotonic; struct timespec64 boottime; }; struct time_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; struct timens_offsets offsets; struct page *vvar_page; /* If set prevents changing offsets after any task joined namespace. */ bool frozen_offsets; } __randomize_layout; extern struct time_namespace init_time_ns; #ifdef CONFIG_TIME_NS static inline struct time_namespace *to_time_ns(struct ns_common *ns) { return container_of(ns, struct time_namespace, ns); } void __init time_ns_init(void); extern int vdso_join_timens(struct task_struct *task, struct time_namespace *ns); extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { ns_ref_inc(ns); return ns; } struct time_namespace *copy_time_ns(u64 flags, struct user_namespace *user_ns, struct time_namespace *old_ns); void free_time_ns(struct time_namespace *ns); void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk); struct page *find_timens_vvar_page(struct vm_area_struct *vma); static inline void put_time_ns(struct time_namespace *ns) { if (ns_ref_put(ns)) free_time_ns(ns); } void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m); struct proc_timens_offset { int clockid; struct timespec64 val; }; int proc_timens_set_offset(struct file *file, struct task_struct *p, struct proc_timens_offset *offsets, int n); static inline void timens_add_monotonic(struct timespec64 *ts) { struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; *ts = timespec64_add(*ts, ns_offsets->monotonic); } static inline void timens_add_boottime(struct timespec64 *ts) { struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; *ts = timespec64_add(*ts, ns_offsets->boottime); } static inline u64 timens_add_boottime_ns(u64 nsec) { struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; return nsec + timespec64_to_ns(&ns_offsets->boottime); } static inline void timens_sub_boottime(struct timespec64 *ts) { struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; *ts = timespec64_sub(*ts, ns_offsets->boottime); } ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim, struct timens_offsets *offsets); static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim) { struct time_namespace *ns = current->nsproxy->time_ns; if (likely(ns == &init_time_ns)) return tim; return do_timens_ktime_to_host(clockid, tim, &ns->offsets); } #else static inline void __init time_ns_init(void) { } static inline int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { return 0; } static inline void timens_commit(struct task_struct *tsk, struct time_namespace *ns) { } static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { return NULL; } static inline void put_time_ns(struct time_namespace *ns) { } static inline struct time_namespace *copy_time_ns(u64 flags, struct user_namespace *user_ns, struct time_namespace *old_ns) { if (flags & CLONE_NEWTIME) return ERR_PTR(-EINVAL); return old_ns; } static inline void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk) { return; } static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) { return NULL; } static inline void timens_add_monotonic(struct timespec64 *ts) { } static inline void timens_add_boottime(struct timespec64 *ts) { } static inline u64 timens_add_boottime_ns(u64 nsec) { return nsec; } static inline void timens_sub_boottime(struct timespec64 *ts) { } static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim) { return tim; } #endif #endif /* _LINUX_TIMENS_H */ |
| 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-2021 Christoph Hellwig. */ #include <linux/iomap.h> #include <linux/fiemap.h> #include <linux/pagemap.h> static int iomap_to_fiemap(struct fiemap_extent_info *fi, const struct iomap *iomap, u32 flags) { switch (iomap->type) { case IOMAP_HOLE: /* skip holes */ return 0; case IOMAP_DELALLOC: flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; break; case IOMAP_MAPPED: break; case IOMAP_UNWRITTEN: flags |= FIEMAP_EXTENT_UNWRITTEN; break; case IOMAP_INLINE: flags |= FIEMAP_EXTENT_DATA_INLINE; break; } if (iomap->flags & IOMAP_F_MERGED) flags |= FIEMAP_EXTENT_MERGED; if (iomap->flags & IOMAP_F_SHARED) flags |= FIEMAP_EXTENT_SHARED; return fiemap_fill_next_extent(fi, iomap->offset, iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, iomap->length, flags); } static int iomap_fiemap_iter(struct iomap_iter *iter, struct fiemap_extent_info *fi, struct iomap *prev) { int ret; if (iter->iomap.type == IOMAP_HOLE) goto advance; ret = iomap_to_fiemap(fi, prev, 0); *prev = iter->iomap; if (ret < 0) return ret; if (ret == 1) /* extent array full */ return 0; advance: return iomap_iter_advance_full(iter); } int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, u64 start, u64 len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = start, .len = len, .flags = IOMAP_REPORT, }; struct iomap prev = { .type = IOMAP_HOLE, }; int ret; ret = fiemap_prep(inode, fi, start, &iter.len, 0); if (ret) return ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = iomap_fiemap_iter(&iter, fi, &prev); if (prev.type != IOMAP_HOLE) { ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST); if (ret < 0) return ret; } /* inode with no (attribute) mapping will give ENOENT */ if (ret < 0 && ret != -ENOENT) return ret; return 0; } EXPORT_SYMBOL_GPL(iomap_fiemap); /* legacy ->bmap interface. 0 is the error return (!) */ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = mapping->host, .pos = (loff_t)bno << mapping->host->i_blkbits, .len = i_blocksize(mapping->host), .flags = IOMAP_REPORT, }; const unsigned int blkshift = mapping->host->i_blkbits - SECTOR_SHIFT; int ret; if (filemap_write_and_wait(mapping)) return 0; bno = 0; while ((ret = iomap_iter(&iter, ops)) > 0) { if (iter.iomap.type == IOMAP_MAPPED) bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift; /* leave iter.status unset to abort loop */ } if (ret) return 0; return bno; } EXPORT_SYMBOL_GPL(iomap_bmap); |
| 17 1 1 1 1 1 1 1 893 897 888 893 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 | // SPDX-License-Identifier: GPL-2.0+ /*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This file implements the usbfs/x/y files, where * x is the bus number and y the device number. * * It allows user space programs/"drivers" to communicate directly * with USB devices without intervening kernel driver. * * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery * (CAN-2005-3055) */ /*****************************************************************************/ #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/quirks.h> #include <linux/cdev.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> #include <linux/dma-mapping.h> #include <asm/byteorder.h> #include <linux/moduleparam.h> #include "usb.h" #ifdef CONFIG_PM #define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND #else #define MAYBE_CAP_SUSPEND 0 #endif #define USB_MAXBUS 64 #define USB_DEVICE_MAX (USB_MAXBUS * 128) #define USB_SG_SIZE 16384 /* split-size for large txs */ /* Mutual exclusion for ps->list in resume vs. release and remove */ static DEFINE_MUTEX(usbfs_mutex); struct usb_dev_state { struct list_head list; /* state list */ struct usb_device *dev; struct file *file; spinlock_t lock; /* protects the async urb lists */ struct list_head async_pending; struct list_head async_completed; struct list_head memory_list; wait_queue_head_t wait; /* wake up if a request completed */ wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */ unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; sigval_t disccontext; unsigned long ifclaimed; u32 disabled_bulk_eps; unsigned long interface_allowed_mask; int not_yet_resumed; bool suspend_allowed; bool privileges_dropped; }; struct usb_memory { struct list_head memlist; int vma_use_count; int urb_use_count; u32 size; void *mem; dma_addr_t dma_handle; unsigned long vm_start; struct usb_dev_state *ps; }; struct async { struct list_head asynclist; struct usb_dev_state *ps; struct pid *pid; const struct cred *cred; unsigned int signr; unsigned int ifnum; void __user *userbuffer; void __user *userurb; sigval_t userurb_sigval; struct urb *urb; struct usb_memory *usbm; unsigned int mem_usage; int status; u8 bulk_addr; u8 bulk_status; }; static bool usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); static unsigned usbfs_snoop_max = 65536; module_param(usbfs_snoop_max, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop_max, "maximum number of bytes to print while snooping"); #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ dev_info(dev, format, ## arg); \ } while (0) enum snoop_when { SUBMIT, COMPLETE }; #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ static u32 usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); /* Hard limit, necessary to avoid arithmetic overflow */ #define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) static DEFINE_SPINLOCK(usbfs_memory_usage_lock); static u64 usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ static int usbfs_increase_memory_usage(u64 amount) { u64 lim, total_mem; unsigned long flags; int ret; lim = READ_ONCE(usbfs_memory_mb); lim <<= 20; ret = 0; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); total_mem = usbfs_memory_usage + amount; if (lim > 0 && total_mem > lim) ret = -ENOMEM; else usbfs_memory_usage = total_mem; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); return ret; } /* Memory for a transfer is being deallocated */ static void usbfs_decrease_memory_usage(u64 amount) { unsigned long flags; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); if (amount > usbfs_memory_usage) usbfs_memory_usage = 0; else usbfs_memory_usage -= amount; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); } static int connected(struct usb_dev_state *ps) { return (!list_empty(&ps->list) && ps->dev->state != USB_STATE_NOTATTACHED); } static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); --*count; if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) { list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); hcd_buffer_free_pages(hcd, usbm->size, usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); } else { spin_unlock_irqrestore(&ps->lock, flags); } } static void usbdev_vm_open(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; unsigned long flags; spin_lock_irqsave(&usbm->ps->lock, flags); ++usbm->vma_use_count; spin_unlock_irqrestore(&usbm->ps->lock, flags); } static void usbdev_vm_close(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; dec_usb_memory_use_count(usbm, &usbm->vma_use_count); } static const struct vm_operations_struct usbdev_vm_ops = { .open = usbdev_vm_open, .close = usbdev_vm_close }; static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_memory *usbm = NULL; struct usb_dev_state *ps = file->private_data; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); if (ret) goto error; usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL); if (!usbm) { ret = -ENOMEM; goto error_decrease_mem; } mem = hcd_buffer_alloc_pages(hcd, size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; } memset(mem, 0, size); usbm->mem = mem; usbm->dma_handle = dma_handle; usbm->size = size; usbm->ps = ps; usbm->vm_start = vma->vm_start; usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); /* * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check * whether we are in such cases, and then use remap_pfn_range (or * dma_mmap_coherent) to map normal (or DMA) pages into the user * space, respectively. */ if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } else { if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &usbdev_vm_ops; vma->vm_private_data = usbm; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&usbm->memlist, &ps->memory_list); spin_unlock_irqrestore(&ps->lock, flags); return 0; error_free_usbm: kfree(usbm); error_decrease_mem: usbfs_decrease_memory_usage(size + sizeof(struct usb_memory)); error: return ret; } static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; unsigned len; loff_t pos; int i; pos = *ppos; usb_lock_device(dev); if (!connected(ps)) { ret = -ENODEV; goto err; } else if (pos < 0) { ret = -EINVAL; goto err; } if (pos < sizeof(struct usb_device_descriptor)) { /* 18 bytes - fits on the stack */ struct usb_device_descriptor temp_desc; memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor)); le16_to_cpus(&temp_desc.bcdUSB); le16_to_cpus(&temp_desc.idVendor); le16_to_cpus(&temp_desc.idProduct); le16_to_cpus(&temp_desc.bcdDevice); len = sizeof(struct usb_device_descriptor) - pos; if (len > nbytes) len = nbytes; if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) { ret = -EFAULT; goto err; } *ppos += len; buf += len; nbytes -= len; ret += len; } pos = sizeof(struct usb_device_descriptor); for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; unsigned int length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ unsigned alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); len = length - (*ppos - pos); if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { alloclen -= (*ppos - pos); if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { ret = -EFAULT; goto err; } } *ppos += len; buf += len; nbytes -= len; ret += len; } pos += length; } err: usb_unlock_device(dev); return ret; } /* * async list handling */ static struct async *alloc_async(unsigned int numisoframes) { struct async *as; as = kzalloc(sizeof(struct async), GFP_KERNEL); if (!as) return NULL; as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL); if (!as->urb) { kfree(as); return NULL; } return as; } static void free_async(struct async *as) { int i; put_pid(as->pid); if (as->cred) put_cred(as->cred); for (i = 0; i < as->urb->num_sgs; i++) { if (sg_page(&as->urb->sg[i])) kfree(sg_virt(&as->urb->sg[i])); } kfree(as->urb->sg); if (as->usbm == NULL) kfree(as->urb->transfer_buffer); else dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count); kfree(as->urb->setup_packet); usb_free_urb(as->urb); usbfs_decrease_memory_usage(as->mem_usage); kfree(as); } static void async_newpending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&as->asynclist, &ps->async_pending); spin_unlock_irqrestore(&ps->lock, flags); } static void async_removepending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); } static struct async *async_getcompleted(struct usb_dev_state *ps) { unsigned long flags; struct async *as = NULL; spin_lock_irqsave(&ps->lock, flags); if (!list_empty(&ps->async_completed)) { as = list_entry(ps->async_completed.next, struct async, asynclist); list_del_init(&as->asynclist); } spin_unlock_irqrestore(&ps->lock, flags); return as; } static struct async *async_getpending(struct usb_dev_state *ps, void __user *userurb) { struct async *as; list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); return as; } return NULL; } static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, int timeout_or_status, enum snoop_when when, unsigned char *data, unsigned data_len) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; int ep; const char *t, *d; if (!usbfs_snoop) return; ep = usb_pipeendpoint(pipe); t = types[usb_pipetype(pipe)]; d = dirs[!!usb_pipein(pipe)]; if (userurb) { /* Async */ if (when == SUBMIT) dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); } else { if (when == SUBMIT) dev_info(&udev->dev, "ep%d %s-%s, length %u, " "timeout %d\n", ep, t, d, length, timeout_or_status); else dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " "status %d\n", ep, t, d, length, timeout_or_status); } data_len = min(data_len, usbfs_snoop_max); if (data && data_len > 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, data, data_len, 1); } } static void snoop_urb_data(struct urb *urb, unsigned len) { int i, size; len = min(len, usbfs_snoop_max); if (!usbfs_snoop || len == 0) return; if (urb->num_sgs == 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, urb->transfer_buffer, len, 1); return; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, sg_virt(&urb->sg[i]), size, 1); len -= size; } } static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) { unsigned i, len, size; if (urb->number_of_packets > 0) /* Isochronous */ len = urb->transfer_buffer_length; else /* Non-Isoc */ len = urb->actual_length; if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; return 0; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; if (copy_to_user(userbuffer, sg_virt(&urb->sg[i]), size)) return -EFAULT; userbuffer += size; len -= size; } return 0; } #define AS_CONTINUATION 1 #define AS_UNLINK 2 static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { struct urb *urb; struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not * including the first one without AS_CONTINUATION. If such an * URB is encountered then a new transfer has already started so * the endpoint doesn't need to be disabled; otherwise it does. */ list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_addr == bulk_addr) { if (as->bulk_status != AS_CONTINUATION) goto rescan; as->bulk_status = AS_UNLINK; as->bulk_addr = 0; } } ps->disabled_bulk_eps |= (1 << bulk_addr); /* Now carefully unlink all the marked pending URBs */ rescan: list_for_each_entry_reverse(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ urb = as->urb; usb_get_urb(urb); spin_unlock(&ps->lock); /* Allow completions */ usb_unlink_urb(urb); usb_put_urb(urb); spin_lock(&ps->lock); goto rescan; } } } static void async_completed(struct urb *urb) { struct async *as = urb->context; struct usb_dev_state *ps = as->ps; struct pid *pid = NULL; const struct cred *cred = NULL; unsigned long flags; sigval_t addr; int signr, errno; spin_lock_irqsave(&ps->lock, flags); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; if (signr) { errno = as->status; addr = as->userurb_sigval; pid = get_pid(as->pid); cred = get_cred(as->cred); } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE, NULL, 0); if (usb_urb_dir_in(urb)) snoop_urb_data(urb, urb->actual_length); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); wake_up(&ps->wait); spin_unlock_irqrestore(&ps->lock, flags); if (signr) { kill_pid_usb_asyncio(signr, errno, addr, pid, cred); put_pid(pid); put_cred(cred); } } static void destroy_async(struct usb_dev_state *ps, struct list_head *list) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); while (!list_empty(list)) { as = list_last_entry(list, struct async, asynclist); list_del_init(&as->asynclist); urb = as->urb; usb_get_urb(urb); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); } static void destroy_async_on_interface(struct usb_dev_state *ps, unsigned int ifnum) { struct list_head *p, *q, hitlist; unsigned long flags; INIT_LIST_HEAD(&hitlist); spin_lock_irqsave(&ps->lock, flags); list_for_each_safe(p, q, &ps->async_pending) if (ifnum == list_entry(p, struct async, asynclist)->ifnum) list_move_tail(p, &hitlist); spin_unlock_irqrestore(&ps->lock, flags); destroy_async(ps, &hitlist); } static void destroy_all_async(struct usb_dev_state *ps) { destroy_async(ps, &ps->async_pending); } /* * interface claims are made only at the request of user level code, * which can also release them (explicitly or by closing files). * they're also undone when devices disconnect. */ static int driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { return -ENODEV; } static void driver_disconnect(struct usb_interface *intf) { struct usb_dev_state *ps = usb_get_intfdata(intf); unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber; if (!ps) return; /* NOTE: this relies on usbcore having canceled and completed * all pending I/O requests; 2.6 does that. */ if (likely(ifnum < 8*sizeof(ps->ifclaimed))) clear_bit(ifnum, &ps->ifclaimed); else dev_warn(&intf->dev, "interface number %u out of range\n", ifnum); usb_set_intfdata(intf, NULL); /* force async requests to complete */ destroy_async_on_interface(ps, ifnum); } /* We don't care about suspend/resume of claimed interfaces */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; } static int driver_resume(struct usb_interface *intf) { return 0; } #ifdef CONFIG_PM /* The following routines apply to the entire device, not interfaces */ void usbfs_notify_suspend(struct usb_device *udev) { /* We don't need to handle this */ } void usbfs_notify_resume(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous remove or release */ mutex_lock(&usbfs_mutex); list_for_each_entry(ps, &udev->filelist, list) { WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); } mutex_unlock(&usbfs_mutex); } #endif struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, .supports_autosuspend = 1, }; static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev = ps->dev; struct usb_interface *intf; int err; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; /* already claimed */ if (test_bit(ifnum, &ps->ifclaimed)) return 0; if (ps->privileges_dropped && !test_bit(ifnum, &ps->interface_allowed_mask)) return -EACCES; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else { unsigned int old_suppress; /* suppress uevents while claiming interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); err = usb_driver_claim_interface(&usbfs_driver, intf, ps); dev_set_uevent_suppress(&intf->dev, old_suppress); } if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; } static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev; struct usb_interface *intf; int err; err = -EINVAL; if (ifnum >= 8*sizeof(ps->ifclaimed)) return err; dev = ps->dev; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { unsigned int old_suppress; /* suppress uevents while releasing interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); usb_driver_release_interface(&usbfs_driver, intf); dev_set_uevent_suppress(&intf->dev, old_suppress); err = 0; } return err; } static int checkintf(struct usb_dev_state *ps, unsigned int ifnum) { if (ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; if (test_bit(ifnum, &ps->ifclaimed)) return 0; /* if not yet claimed, claim it for the driver */ dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim " "interface %u before use\n", task_pid_nr(current), current->comm, ifnum); return claimintf(ps, ifnum); } static int findintfep(struct usb_device *dev, unsigned int ep) { unsigned int i, j, e; struct usb_interface *intf; struct usb_host_interface *alts; struct usb_endpoint_descriptor *endpt; if (ep & ~(USB_DIR_IN|0xf)) return -EINVAL; if (!dev->actconfig) return -ESRCH; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf = dev->actconfig->interface[i]; for (j = 0; j < intf->num_altsetting; j++) { alts = &intf->altsetting[j]; for (e = 0; e < alts->desc.bNumEndpoints; e++) { endpt = &alts->endpoint[e].desc; if (endpt->bEndpointAddress == ep) return alts->desc.bInterfaceNumber; } } } return -ENOENT; } static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype, unsigned int request, unsigned int index) { int ret = 0; struct usb_host_interface *alt_setting; if (ps->dev->state != USB_STATE_UNAUTHENTICATED && ps->dev->state != USB_STATE_ADDRESS && ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype)) return 0; /* * check for the special corner case 'get_device_id' in the printer * class specification, which we always want to allow as it is used * to query things like ink level, etc. */ if (requesttype == 0xa1 && request == 0) { alt_setting = usb_find_alt_setting(ps->dev->actconfig, index >> 8, index & 0xff); if (alt_setting && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER) return 0; } index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: if ((index & ~USB_DIR_IN) == 0) return 0; ret = findintfep(ps->dev, index); if (ret < 0) { /* * Some not fully compliant Win apps seem to get * index wrong and have the endpoint number here * rather than the endpoint address (with the * correct direction). Win does let this through, * so we'll not reject it here but leave it to * the device to not break KVM. But we warn. */ ret = findintfep(ps->dev, index ^ 0x80); if (ret >= 0) dev_info(&ps->dev->dev, "%s: process %i (%s) requesting ep %02x but needs %02x\n", __func__, task_pid_nr(current), current->comm, index, index ^ 0x80); } if (ret >= 0) ret = checkintf(ps, ret); break; case USB_RECIP_INTERFACE: ret = checkintf(ps, index); break; } return ret; } static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev, unsigned char ep) { if (ep & USB_ENDPOINT_DIR_MASK) return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK]; else return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK]; } static int parse_usbdevfs_streams(struct usb_dev_state *ps, struct usbdevfs_streams __user *streams, unsigned int *num_streams_ret, unsigned int *num_eps_ret, struct usb_host_endpoint ***eps_ret, struct usb_interface **intf_ret) { unsigned int i, num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf = NULL; unsigned char ep; int ifnum, ret; if (get_user(num_streams, &streams->num_streams) || get_user(num_eps, &streams->num_eps)) return -EFAULT; if (num_eps < 1 || num_eps > USB_MAXENDPOINTS) return -EINVAL; /* The XHCI controller allows max 2 ^ 16 streams */ if (num_streams_ret && (num_streams < 2 || num_streams > 65536)) return -EINVAL; eps = kmalloc_array(num_eps, sizeof(*eps), GFP_KERNEL); if (!eps) return -ENOMEM; for (i = 0; i < num_eps; i++) { if (get_user(ep, &streams->eps[i])) { ret = -EFAULT; goto error; } eps[i] = ep_to_host_endpoint(ps->dev, ep); if (!eps[i]) { ret = -EINVAL; goto error; } /* usb_alloc/free_streams operate on an usb_interface */ ifnum = findintfep(ps->dev, ep); if (ifnum < 0) { ret = ifnum; goto error; } if (i == 0) { ret = checkintf(ps, ifnum); if (ret < 0) goto error; intf = usb_ifnum_to_if(ps->dev, ifnum); } else { /* Verify all eps belong to the same interface */ if (ifnum != intf->altsetting->desc.bInterfaceNumber) { ret = -EINVAL; goto error; } } } if (num_streams_ret) *num_streams_ret = num_streams; *num_eps_ret = num_eps; *eps_ret = eps; *intf_ret = intf; return 0; error: kfree(eps); return ret; } static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; dev = bus_find_device_by_devt(&usb_bus_type, devt); if (!dev) return NULL; return to_usb_device(dev); } /* * file operations */ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct usb_dev_state *ps; int ret; ret = -ENOMEM; ps = kzalloc(sizeof(struct usb_dev_state), GFP_KERNEL); if (!ps) goto out_free_ps; ret = -ENODEV; /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); if (!dev) goto out_free_ps; usb_lock_device(dev); if (dev->state == USB_STATE_NOTATTACHED) goto out_unlock_device; ret = usb_autoresume_device(dev); if (ret) goto out_unlock_device; ps->dev = dev; ps->file = file; ps->interface_allowed_mask = 0xFFFFFFFF; /* 32 bits */ spin_lock_init(&ps->lock); INIT_LIST_HEAD(&ps->list); INIT_LIST_HEAD(&ps->async_pending); INIT_LIST_HEAD(&ps->async_completed); INIT_LIST_HEAD(&ps->memory_list); init_waitqueue_head(&ps->wait); init_waitqueue_head(&ps->wait_for_resume); ps->disc_pid = get_pid(task_pid(current)); ps->cred = get_current_cred(); smp_wmb(); /* Can't race with resume; the device is already active */ list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; usb_unlock_device(dev); snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); return ret; out_unlock_device: usb_unlock_device(dev); usb_put_dev(dev); out_free_ps: kfree(ps); return ret; } static int usbdev_release(struct inode *inode, struct file *file) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; unsigned int ifnum; struct async *as; usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); list_del_init(&ps->list); mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { if (test_bit(ifnum, &ps->ifclaimed)) releaseintf(ps, ifnum); } destroy_all_async(ps); if (!ps->suspend_allowed) usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); put_cred(ps->cred); as = async_getcompleted(ps); while (as) { free_async(as); as = async_getcompleted(ps); } kfree(ps); return 0; } static void usbfs_blocking_completion(struct urb *urb) { complete((struct completion *) urb->context); } /* * Much like usb_start_wait_urb, but returns status separately from * actual_length and uses a killable wait. */ static int usbfs_start_wait_urb(struct urb *urb, int timeout, unsigned int *actlen) { DECLARE_COMPLETION_ONSTACK(ctx); unsigned long expire; int rc; urb->context = &ctx; urb->complete = usbfs_blocking_completion; *actlen = 0; rc = usb_submit_urb(urb, GFP_KERNEL); if (unlikely(rc)) return rc; expire = (timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT); rc = wait_for_completion_killable_timeout(&ctx, expire); if (rc <= 0) { usb_kill_urb(urb); *actlen = urb->actual_length; if (urb->status != -ENOENT) ; /* Completed before it was killed */ else if (rc < 0) return -EINTR; else return -ETIMEDOUT; } *actlen = urb->actual_length; return urb->status; } static int do_proc_control(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer *ctrl) { struct usb_device *dev = ps->dev; unsigned int tmo; unsigned char *tbuf; unsigned int wLength, actlen; int i, pipe, ret; struct urb *urb = NULL; struct usb_ctrlrequest *dr = NULL; ret = check_ctrlrecip(ps, ctrl->bRequestType, ctrl->bRequest, ctrl->wIndex); if (ret) return ret; wLength = ctrl->wLength; /* To suppress 64k PAGE_SIZE warning */ if (wLength > PAGE_SIZE) return -EINVAL; ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); if (ret) return ret; ret = -ENOMEM; tbuf = (unsigned char *)__get_free_page(GFP_KERNEL); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) goto done; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) goto done; dr->bRequestType = ctrl->bRequestType; dr->bRequest = ctrl->bRequest; dr->wValue = cpu_to_le16(ctrl->wValue); dr->wIndex = cpu_to_le16(ctrl->wIndex); dr->wLength = cpu_to_le16(ctrl->wLength); tmo = ctrl->timeout; snoop(&dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, ctrl->wIndex, ctrl->wLength); if ((ctrl->bRequestType & USB_DIR_IN) && wLength) { pipe = usb_rcvctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen); if (!i && actlen) { if (copy_to_user(ctrl->data, tbuf, actlen)) { ret = -EFAULT; goto done; } } } else { if (wLength) { if (copy_from_user(tbuf, ctrl->data, wLength)) { ret = -EFAULT; goto done; } } pipe = usb_sndctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, tbuf, wLength); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0); } if (i < 0 && i != -EPIPE) { dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL " "failed cmd %s rqt %u rq %u len %u ret %d\n", current->comm, ctrl->bRequestType, ctrl->bRequest, ctrl->wLength, i); } ret = (i < 0 ? i : actlen); done: kfree(dr); usb_free_urb(urb); free_page((unsigned long) tbuf); usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); return ret; } static int proc_control(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ctrltransfer ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return do_proc_control(ps, &ctrl); } static int do_proc_bulk(struct usb_dev_state *ps, struct usbdevfs_bulktransfer *bulk) { struct usb_device *dev = ps->dev; unsigned int tmo, len1, len2, pipe; unsigned char *tbuf; int i, ret; struct urb *urb = NULL; struct usb_host_endpoint *ep; ret = findintfep(ps->dev, bulk->ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; len1 = bulk->len; if (len1 >= (INT_MAX - sizeof(struct urb))) return -EINVAL; if (bulk->ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(dev, bulk->ep & 0x7f); else pipe = usb_sndbulkpipe(dev, bulk->ep & 0x7f); ep = usb_pipe_endpoint(dev, pipe); if (!ep || !usb_endpoint_maxp(&ep->desc)) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; /* * len1 can be almost arbitrarily large. Don't WARN if it's * too big, just fail the request. */ ret = -ENOMEM; tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto done; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, dev, pipe, tbuf, len1, NULL, NULL, ep->desc.bInterval); } else { usb_fill_bulk_urb(urb, dev, pipe, tbuf, len1, NULL, NULL); } tmo = bulk->timeout; if (bulk->ep & 0x80) { snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2); if (!i && len2) { if (copy_to_user(bulk->data, tbuf, len2)) { ret = -EFAULT; goto done; } } } else { if (len1) { if (copy_from_user(tbuf, bulk->data, len1)) { ret = -EFAULT; goto done; } } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0); } ret = (i < 0 ? i : len2); done: usb_free_urb(urb); kfree(tbuf); usbfs_decrease_memory_usage(len1 + sizeof(struct urb)); return ret; } static int proc_bulk(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_bulktransfer bulk; if (copy_from_user(&bulk, arg, sizeof(bulk))) return -EFAULT; return do_proc_bulk(ps, &bulk); } static void check_reset_of_active_ep(struct usb_device *udev, unsigned int epnum, char *ioctl_name) { struct usb_host_endpoint **eps; struct usb_host_endpoint *ep; eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out; ep = eps[epnum & 0x0f]; if (ep && !list_empty(&ep->urb_list)) dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n", task_pid_nr(current), current->comm, ioctl_name, epnum); } static int proc_resetep(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "RESETEP"); usb_reset_endpoint(ps->dev, ep); return 0; } static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int pipe; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT"); if (ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); else pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); return usb_clear_halt(ps->dev, pipe); } static int proc_getdriver(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_getdriver gd; struct usb_interface *intf; int ret; if (copy_from_user(&gd, arg, sizeof(gd))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, gd.interface); if (!intf || !intf->dev.driver) ret = -ENODATA; else { strscpy(gd.driver, intf->dev.driver->name, sizeof(gd.driver)); ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); } return ret; } static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci; memset(&ci, 0, sizeof(ci)); ci.devnum = ps->dev->devnum; ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; } static int proc_conninfo_ex(struct usb_dev_state *ps, void __user *arg, size_t size) { struct usbdevfs_conninfo_ex ci; struct usb_device *udev = ps->dev; if (size < sizeof(ci.size)) return -EINVAL; memset(&ci, 0, sizeof(ci)); ci.size = sizeof(ci); ci.busnum = udev->bus->busnum; ci.devnum = udev->devnum; ci.speed = udev->speed; while (udev && udev->portnum != 0) { if (++ci.num_ports <= ARRAY_SIZE(ci.ports)) ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports] = udev->portnum; udev = udev->parent; } if (ci.num_ports < ARRAY_SIZE(ci.ports)) memmove(&ci.ports[0], &ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports], ci.num_ports); if (copy_to_user(arg, &ci, min(sizeof(ci), size))) return -EFAULT; return 0; } static int proc_resetdevice(struct usb_dev_state *ps) { struct usb_host_config *actconfig = ps->dev->actconfig; struct usb_interface *interface; int i, number; /* Don't allow a device reset if the process has dropped the * privilege to do such things and any of the interfaces are * currently claimed. */ if (ps->privileges_dropped && actconfig) { for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { interface = actconfig->interface[i]; number = interface->cur_altsetting->desc.bInterfaceNumber; if (usb_interface_claimed(interface) && !test_bit(number, &ps->ifclaimed)) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s while '%s' resets device\n", number, interface->dev.driver->name, current->comm); return -EACCES; } } } return usb_reset_device(ps->dev); } static int proc_setintf(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_setinterface setintf; int ret; if (copy_from_user(&setintf, arg, sizeof(setintf))) return -EFAULT; ret = checkintf(ps, setintf.interface); if (ret) return ret; destroy_async_on_interface(ps, setintf.interface); return usb_set_interface(ps->dev, setintf.interface, setintf.altsetting); } static int proc_setconfig(struct usb_dev_state *ps, void __user *arg) { int u; int status = 0; struct usb_host_config *actconfig; if (get_user(u, (int __user *)arg)) return -EFAULT; actconfig = ps->dev->actconfig; /* Don't touch the device if any interfaces are claimed. * It could interfere with other drivers' operations, and if * an interface is claimed by usbfs it could easily deadlock. */ if (actconfig) { int i; for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { if (usb_interface_claimed(actconfig->interface[i])) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s " "while '%s' sets config #%d\n", actconfig->interface[i] ->cur_altsetting ->desc.bInterfaceNumber, actconfig->interface[i] ->dev.driver->name, current->comm, u); status = -EBUSY; break; } } } /* SET_CONFIGURATION is often abused as a "cheap" driver reset, * so avoid usb_set_configuration()'s kick to sysfs */ if (status == 0) { if (actconfig && actconfig->desc.bConfigurationValue == u) status = usb_reset_configuration(ps->dev); else status = usb_set_configuration(ps->dev, u); } return status; } static struct usb_memory * find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb) { struct usb_memory *usbm = NULL, *iter; unsigned long flags; unsigned long uurb_start = (unsigned long)uurb->buffer; spin_lock_irqsave(&ps->lock, flags); list_for_each_entry(iter, &ps->memory_list, memlist) { if (uurb_start >= iter->vm_start && uurb_start < iter->vm_start + iter->size) { if (uurb->buffer_length > iter->vm_start + iter->size - uurb_start) { usbm = ERR_PTR(-EINVAL); } else { usbm = iter; usbm->urb_use_count++; } break; } } spin_unlock_irqrestore(&ps->lock, flags); return usbm; } static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb, struct usbdevfs_iso_packet_desc __user *iso_frame_desc, void __user *arg, sigval_t userurb_sigval) { struct usbdevfs_iso_packet_desc *isopkt = NULL; struct usb_host_endpoint *ep; struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; int i, ret, num_sgs = 0, ifnum = -1; int number_of_packets = 0; unsigned int stream_id = 0; void *buf; bool is_in; bool allow_short = false; bool allow_zero = false; unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT; /* USBDEVFS_URB_ISO_ASAP is a special case */ if (uurb->type == USBDEVFS_URB_TYPE_ISO) mask |= USBDEVFS_URB_ISO_ASAP; if (uurb->flags & ~mask) return -EINVAL; if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { ifnum = findintfep(ps->dev, uurb->endpoint); if (ifnum < 0) return ifnum; ret = checkintf(ps, ifnum); if (ret) return ret; } ep = ep_to_host_endpoint(ps->dev, uurb->endpoint); if (!ep) return -ENOENT; is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0; u = 0; switch (uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ if (uurb->buffer_length < 8) return -EINVAL; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) return -ENOMEM; if (copy_from_user(dr, uurb->buffer, 8)) { ret = -EFAULT; goto error; } if (uurb->buffer_length < (le16_to_cpu(dr->wLength) + 8)) { ret = -EINVAL; goto error; } ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest, le16_to_cpu(dr->wIndex)); if (ret) goto error; uurb->buffer_length = le16_to_cpu(dr->wLength); uurb->buffer += 8; if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) { is_in = true; uurb->endpoint |= USB_DIR_IN; } else { is_in = false; uurb->endpoint &= ~USB_DIR_IN; } if (is_in) allow_short = true; snoop(&ps->dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", dr->bRequestType, dr->bRequest, __le16_to_cpu(dr->wValue), __le16_to_cpu(dr->wIndex), __le16_to_cpu(dr->wLength)); u = sizeof(struct usb_ctrlrequest); break; case USBDEVFS_URB_TYPE_BULK: if (!is_in) allow_zero = true; else allow_short = true; switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; case USB_ENDPOINT_XFER_INT: /* allow single-shot interrupt transfers */ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT; goto interrupt_urb; } num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE); if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize) num_sgs = 0; if (ep->streams) stream_id = uurb->stream_id; break; case USBDEVFS_URB_TYPE_INTERRUPT: if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: if (!is_in) allow_zero = true; else allow_short = true; break; case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) return -EINVAL; if (!usb_endpoint_xfer_isoc(&ep->desc)) return -EINVAL; number_of_packets = uurb->number_of_packets; isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * number_of_packets; isopkt = memdup_user(iso_frame_desc, isofrmlen); if (IS_ERR(isopkt)) { ret = PTR_ERR(isopkt); isopkt = NULL; goto error; } for (totlen = u = 0; u < number_of_packets; u++) { /* * arbitrary limit need for USB 3.1 Gen2 * sizemax: 96 DPs at SSP, 96 * 1024 = 98304 */ if (isopkt[u].length > 98304) { ret = -EINVAL; goto error; } totlen += isopkt[u].length; } u *= sizeof(struct usb_iso_packet_descriptor); uurb->buffer_length = totlen; break; default: return -EINVAL; } if (uurb->buffer_length > 0 && !access_ok(uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } as = alloc_async(number_of_packets); if (!as) { ret = -ENOMEM; goto error; } as->usbm = find_memory_area(ps, uurb); if (IS_ERR(as->usbm)) { ret = PTR_ERR(as->usbm); as->usbm = NULL; goto error; } /* do not use SG buffers when memory mapped segments * are in use */ if (as->usbm) num_sgs = 0; u += sizeof(struct async) + sizeof(struct urb) + (as->usbm ? 0 : uurb->buffer_length) + num_sgs * sizeof(struct scatterlist); ret = usbfs_increase_memory_usage(u); if (ret) goto error; as->mem_usage = u; if (num_sgs) { as->urb->sg = kmalloc_array(num_sgs, sizeof(struct scatterlist), GFP_KERNEL | __GFP_NOWARN); if (!as->urb->sg) { ret = -ENOMEM; goto error; } as->urb->num_sgs = num_sgs; sg_init_table(as->urb->sg, as->urb->num_sgs); totlen = uurb->buffer_length; for (i = 0; i < as->urb->num_sgs; i++) { u = (totlen > USB_SG_SIZE) ? USB_SG_SIZE : totlen; buf = kmalloc(u, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto error; } sg_set_buf(&as->urb->sg[i], buf, u); if (!is_in) { if (copy_from_user(buf, uurb->buffer, u)) { ret = -EFAULT; goto error; } uurb->buffer += u; } totlen -= u; } } else if (uurb->buffer_length > 0) { if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_buffer = as->usbm->mem + (uurb_start - as->usbm->vm_start); } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL | __GFP_NOWARN); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; } if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) { /* * Isochronous input data may end up being * discontiguous if some of the packets are * short. Clear the buffer so that the gaps * don't leak kernel data to userspace. */ memset(as->urb->transfer_buffer, 0, uurb->buffer_length); } } } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); /* This tedious sequence is necessary because the URB_* flags * are internal to the kernel and subject to change, whereas * the USBDEVFS_URB_* flags are a user API and must not be changed. */ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = number_of_packets; as->urb->stream_id = stream_id; if (ep->desc.bInterval) { if (uurb->type == USBDEVFS_URB_TYPE_ISO || ps->dev->speed == USB_SPEED_HIGH || ps->dev->speed >= USB_SPEED_SUPER) as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); else as->urb->interval = ep->desc.bInterval; } as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < number_of_packets; u++) { as->urb->iso_frame_desc[u].offset = totlen; as->urb->iso_frame_desc[u].length = isopkt[u].length; totlen += isopkt[u].length; } kfree(isopkt); isopkt = NULL; as->ps = ps; as->userurb = arg; as->userurb_sigval = userurb_sigval; if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; as->urb->transfer_dma = as->usbm->dma_handle + (uurb_start - as->usbm->vm_start); } else if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); as->cred = get_current_cred(); snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT, NULL, 0); if (!is_in) snoop_urb_data(as->urb, as->urb->transfer_buffer_length); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { spin_lock_irq(&ps->lock); /* Not exactly the endpoint address; the direction bit is * shifted to the 0x10 position so that the value will be * between 0 and 31. */ as->bulk_addr = usb_endpoint_num(&ep->desc) | ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3); /* If this bulk URB is the start of a new transfer, re-enable * the endpoint. Otherwise mark it as a continuation URB. */ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) as->bulk_status = AS_CONTINUATION; else ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); /* Don't accept continuation URBs if the endpoint is * disabled because of an earlier error. */ if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) ret = -EREMOTEIO; else ret = usb_submit_urb(as->urb, GFP_ATOMIC); spin_unlock_irq(&ps->lock); } else { ret = usb_submit_urb(as->urb, GFP_KERNEL); } if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, 0, ret, COMPLETE, NULL, 0); async_removepending(as); goto error; } return 0; error: kfree(isopkt); kfree(dr); if (as) free_async(as); return ret; } static int proc_submiturb(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (copy_from_user(&uurb, arg, sizeof(uurb))) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_ptr = arg; return proc_do_submiturb(ps, &uurb, (((struct usbdevfs_urb __user *)arg)->iso_frame_desc), arg, userurb_sigval); } static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); as = async_getpending(ps, arg); if (!as) { spin_unlock_irqrestore(&ps->lock, flags); return -EINVAL; } urb = as->urb; usb_get_urb(urb); spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); return 0; } static void compute_isochronous_actual_length(struct urb *urb) { unsigned int i; if (urb->number_of_packets > 0) { urb->actual_length = 0; for (i = 0; i < urb->number_of_packets; i++) urb->actual_length += urb->iso_frame_desc[i].actual_length; } } static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; } if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) goto err_out; if (put_user(urb->error_count, &userurb->error_count)) goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) goto err_out; } } if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: return -EFAULT; } static struct async *reap_as(struct usb_dev_state *ps) { DECLARE_WAITQUEUE(wait, current); struct async *as = NULL; struct usb_device *dev = ps->dev; add_wait_queue(&ps->wait, &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); as = async_getcompleted(ps); if (as || !connected(ps)) break; if (signal_pending(current)) break; usb_unlock_device(dev); schedule(); usb_lock_device(dev); } remove_wait_queue(&ps->wait, &wait); set_current_state(TASK_RUNNING); return as; } static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #ifdef CONFIG_COMPAT static int proc_control_compat(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer32 __user *p32) { struct usbdevfs_ctrltransfer ctrl; u32 udata; if (copy_from_user(&ctrl, p32, sizeof(*p32) - sizeof(compat_caddr_t)) || get_user(udata, &p32->data)) return -EFAULT; ctrl.data = compat_ptr(udata); return do_proc_control(ps, &ctrl); } static int proc_bulk_compat(struct usb_dev_state *ps, struct usbdevfs_bulktransfer32 __user *p32) { struct usbdevfs_bulktransfer bulk; compat_caddr_t addr; if (get_user(bulk.ep, &p32->ep) || get_user(bulk.len, &p32->len) || get_user(bulk.timeout, &p32->timeout) || get_user(addr, &p32->data)) return -EFAULT; bulk.data = compat_ptr(addr); return do_proc_bulk(ps, &bulk); } static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal32 ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_int = ds.context; return 0; } static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { struct usbdevfs_urb32 urb32; if (copy_from_user(&urb32, uurb, sizeof(*uurb))) return -EFAULT; kurb->type = urb32.type; kurb->endpoint = urb32.endpoint; kurb->status = urb32.status; kurb->flags = urb32.flags; kurb->buffer = compat_ptr(urb32.buffer); kurb->buffer_length = urb32.buffer_length; kurb->actual_length = urb32.actual_length; kurb->start_frame = urb32.start_frame; kurb->number_of_packets = urb32.number_of_packets; kurb->error_count = urb32.error_count; kurb->signr = urb32.signr; kurb->usercontext = compat_ptr(urb32.usercontext); return 0; } static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg)) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_int = ptr_to_compat(arg); return proc_do_submiturb(ps, &uurb, ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc, arg, userurb_sigval); } static int processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; } if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; } static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #endif static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_ptr = ds.context; return 0; } static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; return claimintf(ps, ifnum); } static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; int ret; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; ret = releaseintf(ps, ifnum); if (ret < 0) return ret; destroy_async_on_interface(ps, ifnum); return 0; } static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl) { int size; void *buf = NULL; int retval = 0; struct usb_interface *intf = NULL; struct usb_driver *driver = NULL; if (ps->privileges_dropped) return -EACCES; if (!connected(ps)) return -ENODEV; /* alloc buffer */ size = _IOC_SIZE(ctl->ioctl_code); if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) { if (copy_from_user(buf, ctl->data, size)) { kfree(buf); return -EFAULT; } } else { memset(buf, 0, size); } } if (ps->dev->state != USB_STATE_CONFIGURED) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; else switch (ctl->ioctl_code) { /* disconnect kernel driver from interface */ case USBDEVFS_DISCONNECT: if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } else retval = -ENODATA; break; /* let kernel drivers try to (re)bind to the interface */ case USBDEVFS_CONNECT: if (!intf->dev.driver) retval = device_attach(&intf->dev); else retval = -EBUSY; break; /* talk directly to the interface's driver */ default: if (intf->dev.driver) driver = to_usb_driver(intf->dev.driver); if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; } else { retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } } /* cleanup and return */ if (retval >= 0 && (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0 && size > 0 && copy_to_user(ctl->data, buf, size) != 0) retval = -EFAULT; kfree(buf); return retval; } static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ioctl ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return proc_ioctl(ps, &ctrl); } #ifdef CONFIG_COMPAT static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg) { struct usbdevfs_ioctl32 ioc32; struct usbdevfs_ioctl ctrl; if (copy_from_user(&ioc32, compat_ptr(arg), sizeof(ioc32))) return -EFAULT; ctrl.ifno = ioc32.ifno; ctrl.ioctl_code = ioc32.ioctl_code; ctrl.data = compat_ptr(ioc32.data); return proc_ioctl(ps, &ctrl); } #endif static int proc_claim_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; int rc; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; rc = usb_hub_claim_port(ps->dev, portnum, ps); if (rc == 0) snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", portnum, task_pid_nr(current), current->comm); return rc; } static int proc_release_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; return usb_hub_release_port(ps->dev, portnum, ps); } static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg) { __u32 caps; caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP | USBDEVFS_CAP_DROP_PRIVILEGES | USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND; if (!ps->dev->bus->no_stop_on_short) caps |= USBDEVFS_CAP_BULK_CONTINUATION; if (ps->dev->bus->sg_tablesize) caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER; if (put_user(caps, (__u32 __user *)arg)) return -EFAULT; return 0; } static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnect_claim dc; struct usb_interface *intf; if (copy_from_user(&dc, arg, sizeof(dc))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, dc.interface); if (!intf) return -EINVAL; if (intf->dev.driver) { struct usb_driver *driver = to_usb_driver(intf->dev.driver); if (ps->privileges_dropped) return -EACCES; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) != 0) return -EBUSY; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) == 0) return -EBUSY; dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } return claimintf(ps, dc.interface); } static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL); kfree(eps); return r; } static int proc_free_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL); kfree(eps); return r; } static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg) { u32 data; if (copy_from_user(&data, arg, sizeof(data))) return -EFAULT; /* This is a one way operation. Once privileges are * dropped, you cannot regain them. You may however reissue * this ioctl to shrink the allowed interfaces mask. */ ps->interface_allowed_mask &= data; ps->privileges_dropped = true; return 0; } static int proc_forbid_suspend(struct usb_dev_state *ps) { int ret = 0; if (ps->suspend_allowed) { ret = usb_autoresume_device(ps->dev); if (ret == 0) ps->suspend_allowed = false; else if (ret != -ENODEV) ret = -EIO; } return ret; } static int proc_allow_suspend(struct usb_dev_state *ps) { if (!connected(ps)) return -ENODEV; WRITE_ONCE(ps->not_yet_resumed, 1); if (!ps->suspend_allowed) { usb_autosuspend_device(ps->dev); ps->suspend_allowed = true; } return 0; } static int proc_wait_for_resume(struct usb_dev_state *ps) { int ret; usb_unlock_device(ps->dev); ret = wait_event_interruptible(ps->wait_for_resume, READ_ONCE(ps->not_yet_resumed) == 0); usb_lock_device(ps->dev); if (ret != 0) return -EINTR; return proc_forbid_suspend(ps); } /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, void __user *p) { struct usb_dev_state *ps = file->private_data; struct inode *inode = file_inode(file); struct usb_device *dev = ps->dev; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; usb_lock_device(dev); /* Reap operations are allowed even after disconnection */ switch (cmd) { case USBDEVFS_REAPURB: snoop(&dev->dev, "%s: REAPURB\n", __func__); ret = proc_reapurb(ps, p); goto done; case USBDEVFS_REAPURBNDELAY: snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ret = proc_reapurbnonblock(ps, p); goto done; #ifdef CONFIG_COMPAT case USBDEVFS_REAPURB32: snoop(&dev->dev, "%s: REAPURB32\n", __func__); ret = proc_reapurb_compat(ps, p); goto done; case USBDEVFS_REAPURBNDELAY32: snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ret = proc_reapurbnonblock_compat(ps, p); goto done; #endif } if (!connected(ps)) { usb_unlock_device(dev); return -ENODEV; } switch (cmd) { case USBDEVFS_CONTROL: snoop(&dev->dev, "%s: CONTROL\n", __func__); ret = proc_control(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK: snoop(&dev->dev, "%s: BULK\n", __func__); ret = proc_bulk(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESETEP: snoop(&dev->dev, "%s: RESETEP\n", __func__); ret = proc_resetep(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESET: snoop(&dev->dev, "%s: RESET\n", __func__); ret = proc_resetdevice(ps); break; case USBDEVFS_CLEAR_HALT: snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__); ret = proc_clearhalt(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_GETDRIVER: snoop(&dev->dev, "%s: GETDRIVER\n", __func__); ret = proc_getdriver(ps, p); break; case USBDEVFS_CONNECTINFO: snoop(&dev->dev, "%s: CONNECTINFO\n", __func__); ret = proc_connectinfo(ps, p); break; case USBDEVFS_SETINTERFACE: snoop(&dev->dev, "%s: SETINTERFACE\n", __func__); ret = proc_setintf(ps, p); break; case USBDEVFS_SETCONFIGURATION: snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__); ret = proc_setconfig(ps, p); break; case USBDEVFS_SUBMITURB: snoop(&dev->dev, "%s: SUBMITURB\n", __func__); ret = proc_submiturb(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; #ifdef CONFIG_COMPAT case USBDEVFS_CONTROL32: snoop(&dev->dev, "%s: CONTROL32\n", __func__); ret = proc_control_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK32: snoop(&dev->dev, "%s: BULK32\n", __func__); ret = proc_bulk_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_DISCSIGNAL32: snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__); ret = proc_disconnectsignal_compat(ps, p); break; case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); ret = proc_submiturb_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_IOCTL32: snoop(&dev->dev, "%s: IOCTL32\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif case USBDEVFS_DISCARDURB: snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p); ret = proc_unlinkurb(ps, p); break; case USBDEVFS_DISCSIGNAL: snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); ret = proc_disconnectsignal(ps, p); break; case USBDEVFS_CLAIMINTERFACE: snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__); ret = proc_claiminterface(ps, p); break; case USBDEVFS_RELEASEINTERFACE: snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__); ret = proc_releaseinterface(ps, p); break; case USBDEVFS_IOCTL: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; case USBDEVFS_CLAIM_PORT: snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); ret = proc_claim_port(ps, p); break; case USBDEVFS_RELEASE_PORT: snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); ret = proc_release_port(ps, p); break; case USBDEVFS_GET_CAPABILITIES: ret = proc_get_capabilities(ps, p); break; case USBDEVFS_DISCONNECT_CLAIM: ret = proc_disconnect_claim(ps, p); break; case USBDEVFS_ALLOC_STREAMS: ret = proc_alloc_streams(ps, p); break; case USBDEVFS_FREE_STREAMS: ret = proc_free_streams(ps, p); break; case USBDEVFS_DROP_PRIVILEGES: ret = proc_drop_privileges(ps, p); break; case USBDEVFS_GET_SPEED: ret = ps->dev->speed; break; case USBDEVFS_FORBID_SUSPEND: ret = proc_forbid_suspend(ps); break; case USBDEVFS_ALLOW_SUSPEND: ret = proc_allow_suspend(ps); break; case USBDEVFS_WAIT_FOR_RESUME: ret = proc_wait_for_resume(ps); break; } /* Handle variable-length commands */ switch (cmd & ~IOCSIZE_MASK) { case USBDEVFS_CONNINFO_EX(0): ret = proc_conninfo_ex(ps, p, _IOC_SIZE(cmd)); break; } done: usb_unlock_device(dev); if (ret >= 0) inode_set_atime_to_ts(inode, current_time(inode)); return ret; } static long usbdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); return ret; } /* No kernel lock - fine */ static __poll_t usbdev_poll(struct file *file, struct poll_table_struct *wait) { struct usb_dev_state *ps = file->private_data; __poll_t mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= EPOLLOUT | EPOLLWRNORM; if (!connected(ps)) mask |= EPOLLHUP; if (list_empty(&ps->list)) mask |= EPOLLERR; return mask; } const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, .llseek = no_seek_end_llseek, .read = usbdev_read, .poll = usbdev_poll, .unlocked_ioctl = usbdev_ioctl, .compat_ioctl = compat_ptr_ioctl, .mmap = usbdev_mmap, .open = usbdev_open, .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct usb_dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); list_del_init(&ps->list); if (ps->discsignr) kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext, ps->disc_pid, ps->cred); } mutex_unlock(&usbfs_mutex); } static int usbdev_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: break; case USB_DEVICE_REMOVE: usbdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { .notifier_call = usbdev_notify, }; static struct cdev usb_device_cdev; int __init usb_devio_init(void) { int retval; retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX, "usb_device"); if (retval) { printk(KERN_ERR "Unable to register minors for usb_device\n"); goto out; } cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { printk(KERN_ERR "Unable to get usb_device major %d\n", USB_DEVICE_MAJOR); goto error_cdev; } usb_register_notify(&usbdev_nb); out: return retval; error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } void usb_devio_cleanup(void) { usb_unregister_notify(&usbdev_nb); cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); } |
| 27 27 20 4 4 2 2 14 21 21 21 7 15 7 3 1 3 3 1 1 1 1 1 1 1 1 1 1 1 21 21 15 9 6 6 1 1 1 1 1 1 1 1 14 2 21 3 21 8 10 10 1 5 5 5 4 2 5 2 6 5 2 2 6 6 2 2 10 10 7 5 3 5 6 2 10 10 5 5 8 5 5 4 2 2 14 14 14 14 9 1 2 2 5 2 5 7 2 7 3 9 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/indirect.c * * from * * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie * (sct@redhat.com), 1993, 1998 */ #include "ext4_jbd2.h" #include "truncate.h" #include <linux/dax.h> #include <linux/uio.h> #include <trace/events/ext4.h> typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) { p->key = *(p->p = v); p->bh = bh; } /** * ext4_block_to_path - parse the block number into array of offsets * @inode: inode in question (we are only interested in its superblock) * @i_block: block number to be parsed * @offsets: array to store the offsets in * @boundary: set this non-zero if the referred-to block is likely to be * followed (on disk) by an indirect block. * * To store the locations of file's data ext4 uses a data structure common * for UNIX filesystems - tree of pointers anchored in the inode, with * data blocks at leaves and indirect blocks in intermediate nodes. * This function translates the block number into path in that tree - * return value is the path length and @offsets[n] is the offset of * pointer to (n+1)th node in the nth one. If @block is out of range * (negative or too large) warning is printed and zero returned. * * Note: function doesn't find node addresses, so no IO is needed. All * we need to know is the capacity of indirect blocks (taken from the * inode->i_sb). */ /* * Portability note: the last comparison (check that we fit into triple * indirect block) is spelled differently, because otherwise on an * architecture with 32-bit longs and 8Kb pages we might get into trouble * if our filesystem had 8Kb blocks. We might use long long, but that would * kill us on x86. Oh, well, at least the sign propagation does not matter - * i_block would have to be negative in the very beginning, so we would not * get there at all. */ static int ext4_block_to_path(struct inode *inode, ext4_lblk_t i_block, ext4_lblk_t offsets[4], int *boundary) { int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); const long direct_blocks = EXT4_NDIR_BLOCKS, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; int final = 0; if (i_block < direct_blocks) { offsets[n++] = i_block; final = direct_blocks; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = EXT4_IND_BLOCK; offsets[n++] = i_block; final = ptrs; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = EXT4_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = EXT4_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else { ext4_warning(inode->i_sb, "block %lu > max in inode %lu", i_block + direct_blocks + indirect_blocks + double_blocks, inode->i_ino); } if (boundary) *boundary = final - 1 - (i_block & (ptrs - 1)); return n; } /** * ext4_get_branch - read the chain of indirect blocks leading to data * @inode: inode in question * @depth: depth of the chain (1 - direct pointer, etc.) * @offsets: offsets of pointers in inode/indirect blocks * @chain: place to store the result * @err: here we store the error value * * Function fills the array of triples <key, p, bh> and returns %NULL * if everything went OK or the pointer to the last filled triple * (incomplete one) otherwise. Upon the return chain[i].key contains * the number of (i+1)-th block in the chain (as it is stored in memory, * i.e. little-endian 32-bit), chain[i].p contains the address of that * number (it points into struct inode for i==0 and into the bh->b_data * for i>0) and chain[i].bh points to the buffer_head of i-th indirect * block for i>0 and NULL for i==0. In other words, it holds the block * numbers of the chain, addresses they were taken from (and where we can * verify that chain did not change) and buffer_heads hosting these * numbers. * * Function stops when it stumbles upon zero pointer (absent block) * (pointer to last triple returned, *@err == 0) * or when it gets an IO error reading an indirect block * (ditto, *@err == -EIO) * or when it reads all @depth-1 indirect blocks successfully and finds * the whole chain, all way to the data (returns %NULL, *err == 0). * * Need to be called with * down_read(&EXT4_I(inode)->i_data_sem) */ static Indirect *ext4_get_branch(struct inode *inode, int depth, ext4_lblk_t *offsets, Indirect chain[4], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; unsigned int key; int ret = -EIO; *err = 0; /* i_data is not going away, no lock needed */ add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { key = le32_to_cpu(p->key); if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) { /* the block was out of range */ ret = -EFSCORRUPTED; goto failure; } bh = sb_getblk(sb, key); if (unlikely(!bh)) { ret = -ENOMEM; goto failure; } if (!bh_uptodate_or_lock(bh)) { if (ext4_read_bh(bh, 0, NULL, false) < 0) { put_bh(bh); goto failure; } /* validate block references */ if (ext4_check_indirect_blockref(inode, bh)) { put_bh(bh); goto failure; } } add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); /* Reader: end */ if (!p->key) goto no_block; } return NULL; failure: *err = ret; no_block: return p; } /** * ext4_find_near - find a place for allocation with sufficient locality * @inode: owner * @ind: descriptor of indirect block. * * This function returns the preferred place for block allocation. * It is used when heuristic for sequential allocation fails. * Rules are: * + if there is a block to the left of our position - allocate near it. * + if pointer will live in indirect block - allocate near that block. * + if pointer will live in inode - allocate in the same * cylinder group. * * In the latter case we colour the starting block by the callers PID to * prevent it from clashing with concurrent allocations for a different inode * in the same block group. The PID is used here so that functionally related * files will be close-by on-disk. * * Caller must make sure that @ind is valid and will stay that way. */ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) { struct ext4_inode_info *ei = EXT4_I(inode); __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; __le32 *p; /* Try to find previous block */ for (p = ind->p - 1; p >= start; p--) { if (*p) return le32_to_cpu(*p); } /* No such thing, so let's try location of indirect block */ if (ind->bh) return ind->bh->b_blocknr; /* * It is going to be referred to from the inode itself? OK, just put it * into the same cylinder group then. */ return ext4_inode_to_goal_block(inode); } /** * ext4_find_goal - find a preferred place for allocation. * @inode: owner * @block: block we want * @partial: pointer to the last triple within a chain * * Normally this function find the preferred place for block allocation, * returns it. * Because this is only used for non-extent files, we limit the block nr * to 32 bits. */ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, Indirect *partial) { ext4_fsblk_t goal; /* * XXX need to get goal block from mballoc's data structures */ goal = ext4_find_near(inode, partial); goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; return goal; } /** * ext4_blks_to_allocate - Look up the block map and count the number * of direct blocks need to be allocated for the given branch. * * @branch: chain of indirect blocks * @k: number of blocks need for indirect blocks * @blks: number of data blocks to be mapped. * @blocks_to_boundary: the offset in the indirect block * * return the total number of blocks to be allocate, including the * direct and indirect blocks. */ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, int blocks_to_boundary) { unsigned int count = 0; /* * Simple case, [t,d]Indirect block(s) has not allocated yet * then it's clear blocks on that path have not allocated */ if (k > 0) { /* right now we don't handle cross boundary allocation */ if (blks < blocks_to_boundary + 1) count += blks; else count += blocks_to_boundary + 1; return count; } count++; while (count < blks && count <= blocks_to_boundary && le32_to_cpu(*(branch[0].p + count)) == 0) { count++; } return count; } /** * ext4_alloc_branch() - allocate and set up a chain of blocks * @handle: handle for this transaction * @ar: structure describing the allocation request * @indirect_blks: number of allocated indirect blocks * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * * This function allocates blocks, zeroes out all but the last one, * links them into chain and (if we are synchronous) writes them to disk. * In other words, it prepares a branch that can be spliced onto the * inode. It stores the information about that chain in the branch[], in * the same format as ext4_get_branch() would do. We are calling it after * we had read the existing part of chain and partial points to the last * triple of that (one with zero ->key). Upon the exit we have the same * picture as after the successful ext4_get_block(), except that in one * place chain is disconnected - *branch->p is still zero (we did not * set the last link), but branch->key contains the number that should * be placed into *branch->p to fill that gap. * * If allocation fails we free all blocks we've allocated (and forget * their buffer_heads) and return the error value the from failed * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ static int ext4_alloc_branch(handle_t *handle, struct ext4_allocation_request *ar, int indirect_blks, ext4_lblk_t *offsets, Indirect *branch) { struct buffer_head * bh; ext4_fsblk_t b, new_blocks[4]; __le32 *p; int i, j, err, len = 1; for (i = 0; i <= indirect_blks; i++) { if (i == indirect_blks) { new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); } else { ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, ar->inode, ar->goal, ar->flags & EXT4_MB_DELALLOC_RESERVED, NULL, &err); /* Simplify error cleanup... */ branch[i+1].bh = NULL; } if (err) { i--; goto failed; } branch[i].key = cpu_to_le32(new_blocks[i]); if (i == 0) continue; bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); if (unlikely(!bh)) { err = -ENOMEM; goto failed; } lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, ar->inode->i_sb, bh, EXT4_JTR_NONE); if (err) { unlock_buffer(bh); goto failed; } memset(bh->b_data, 0, bh->b_size); p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; b = new_blocks[i]; if (i == indirect_blks) len = ar->len; for (j = 0; j < len; j++) *p++ = cpu_to_le32(b++); BUFFER_TRACE(bh, "marking uptodate"); set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, ar->inode, bh); if (err) goto failed; } return 0; failed: if (i == indirect_blks) { /* Free data blocks */ ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], ar->len, 0); i--; } for (; i >= 0; i--) { /* * We want to ext4_forget() only freshly allocated indirect * blocks. Buffer for new_blocks[i] is at branch[i+1].bh * (buffer at branch[0].bh is indirect block / inode already * existing before ext4_alloc_branch() was called). Also * because blocks are freshly allocated, we don't need to * revoke them which is why we don't set * EXT4_FREE_BLOCKS_METADATA. */ ext4_free_blocks(handle, ar->inode, branch[i+1].bh, new_blocks[i], 1, branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0); } return err; } /** * ext4_splice_branch() - splice the allocated branch onto inode. * @handle: handle for this transaction * @ar: structure describing the allocation request * @where: location of missing link * @num: number of indirect blocks we are adding * * This function fills the missing link and does all housekeeping needed in * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. */ static int ext4_splice_branch(handle_t *handle, struct ext4_allocation_request *ar, Indirect *where, int num) { int i; int err = 0; ext4_fsblk_t current_block; /* * If we're splicing into a [td]indirect block (as opposed to the * inode) then we need to get write access to the [td]indirect block * before the splice. */ if (where->bh) { BUFFER_TRACE(where->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, ar->inode->i_sb, where->bh, EXT4_JTR_NONE); if (err) goto err_out; } /* That's it */ *where->p = where->key; /* * Update the host buffer_head or inode to point to more just allocated * direct blocks blocks */ if (num == 0 && ar->len > 1) { current_block = le32_to_cpu(where->key) + 1; for (i = 1; i < ar->len; i++) *(where->p + i) = cpu_to_le32(current_block++); } /* We are done with atomic stuff, now do the rest of housekeeping */ /* had we spliced it onto indirect block? */ if (where->bh) { /* * If we spliced it onto an indirect block, we haven't * altered the inode. Note however that if it is being spliced * onto an indirect block at the very end of the file (the * file is growing) then we *will* alter the inode to reflect * the new i_size. But that is not done here - it is done in * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. */ ext4_debug("splicing indirect only\n"); BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); if (err) goto err_out; } else { /* * OK, we spliced it into the inode itself on a direct block. */ err = ext4_mark_inode_dirty(handle, ar->inode); if (unlikely(err)) goto err_out; ext4_debug("splicing direct\n"); } return err; err_out: for (i = 1; i <= num; i++) { /* * branch[i].bh is newly allocated, so there is no * need to revoke the block, which is why we don't * need to set EXT4_FREE_BLOCKS_METADATA. */ ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, EXT4_FREE_BLOCKS_FORGET); } ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), ar->len, 0); return err; } /* * The ext4_ind_map_blocks() function handles non-extents inodes * (i.e., using the traditional indirect/double-indirect i_blocks * scheme) for ext4_map_blocks(). * * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything * to tree, set linkage between the newborn blocks, write them if sync is * required, recheck the path, free and repeat if check fails, otherwise * set the last missing link (that will protect us from any truncate-generated * removals - all blocks on the path are immune now) and possibly force the * write on the parent block. * That has a nice additional property: no special recovery from the failed * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. * * `handle' can be NULL if create == 0. * * return > 0, # of blocks mapped or allocated. * return = 0, if plain lookup failed. * return < 0, error case. * * The ext4_ind_get_blocks() function should be called with * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system * blocks. */ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct ext4_allocation_request ar; int err = -EIO; ext4_lblk_t offsets[4]; Indirect chain[4]; Indirect *partial; int indirect_blks; int blocks_to_boundary = 0; int depth; u64 count = 0; ext4_fsblk_t first_block = 0; trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); depth = ext4_block_to_path(inode, map->m_lblk, offsets, &blocks_to_boundary); if (depth == 0) goto out; partial = ext4_get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); count++; /*map more blocks*/ while (count < map->m_len && count <= blocks_to_boundary) { ext4_fsblk_t blk; blk = le32_to_cpu(*(chain[depth-1].p + count)); if (blk == first_block + count) count++; else break; } goto got_it; } /* Next simple case - plain lookup failed */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); int i; /* * Count number blocks in a subtree under 'partial'. At each * level we count number of complete empty subtrees beyond * current offset and then descend into the subtree only * partially beyond current offset. */ count = 0; for (i = partial - chain + 1; i < depth; i++) count = count * epb + (epb - offsets[i] - 1); count++; /* Fill in size of a hole we found */ map->m_pblk = 0; map->m_len = umin(map->m_len, count); goto cleanup; } /* Failed read of indirect block */ if (err == -EIO) goto cleanup; /* * Okay, we need to do block allocation. */ if (ext4_has_feature_bigalloc(inode->i_sb)) { EXT4_ERROR_INODE(inode, "Can't allocate blocks for " "non-extent mapped inodes with bigalloc"); err = -EFSCORRUPTED; goto out; } /* Set up for the direct block allocation */ memset(&ar, 0, sizeof(ar)); ar.inode = inode; ar.logical = map->m_lblk; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) ar.flags |= EXT4_MB_DELALLOC_RESERVED; if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) ar.flags |= EXT4_MB_USE_RESERVED; ar.goal = ext4_find_goal(inode, map->m_lblk, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; /* * Next look up the indirect map to count the totoal number of * direct blocks to allocate for this branch. */ ar.len = ext4_blks_to_allocate(partial, indirect_blks, map->m_len, blocks_to_boundary); /* * Block out ext4_truncate while we alter the tree */ err = ext4_alloc_branch(handle, &ar, indirect_blks, offsets + (partial - chain), partial); /* * The ext4_splice_branch call will free and forget any buffers * on the new chain if there is a failure, but that risks using * up transaction credits, especially for bitmaps where the * credits cannot be returned. Can we handle this somehow? We * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) err = ext4_splice_branch(handle, &ar, partial, indirect_blks); if (err) goto cleanup; map->m_flags |= EXT4_MAP_NEW; ext4_update_inode_fsync_trans(handle, inode, 1); count = ar.len; got_it: map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = le32_to_cpu(chain[depth-1].key); map->m_len = count; if (count > blocks_to_boundary) map->m_flags |= EXT4_MAP_BOUNDARY; err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ cleanup: while (partial > chain) { BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); partial--; } out: trace_ext4_ind_map_blocks_exit(inode, flags, map, err); return err; } /* * Calculate number of indirect blocks touched by mapping @nrblocks logically * contiguous blocks */ int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) { /* * With N contiguous data blocks, we need at most * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, * 2 dindirect blocks, and 1 tindirect block */ return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; } static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, struct buffer_head *bh, int *dropped) { int err; if (bh) { BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (unlikely(err)) return err; } err = ext4_mark_inode_dirty(handle, inode); if (unlikely(err)) return err; /* * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this * moment, get_block can be called only for blocks inside i_size since * page cache has been already dropped and writes are blocked by * i_rwsem. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); ext4_discard_preallocations(inode); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; } /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a convenient checkpoint to make * sure we don't overflow the journal. * * Try to extend this transaction for the purposes of truncation. If * extend fails, we restart transaction. */ static int ext4_ind_truncate_ensure_credits(handle_t *handle, struct inode *inode, struct buffer_head *bh, int revoke_creds) { int ret; int dropped = 0; ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS, ext4_blocks_for_truncate(inode), revoke_creds, ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped)); if (dropped) down_write(&EXT4_I(inode)->i_data_sem); if (ret <= 0) return ret; if (bh) { BUFFER_TRACE(bh, "retaking write access"); ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); if (unlikely(ret)) return ret; } return 0; } /* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */ static inline int all_zeroes(__le32 *p, __le32 *q) { while (p < q) if (*p++) return 0; return 1; } /** * ext4_find_shared - find the indirect blocks for partial truncation. * @inode: inode in question * @depth: depth of the affected branch * @offsets: offsets of pointers in that branch (see ext4_block_to_path) * @chain: place to store the pointers to partial indirect blocks * @top: place to the (detached) top of branch * * This is a helper function used by ext4_truncate(). * * When we do truncate() we may have to clean the ends of several * indirect blocks but leave the blocks themselves alive. Block is * partially truncated if some data below the new i_size is referred * from it (and it is on the path to the first completely truncated * data block, indeed). We have to free the top of that path along * with everything to the right of the path. Since no allocation * past the truncation point is possible until ext4_truncate() * finishes, we may safely do the latter, but top of branch may * require special attention - pageout below the truncation point * might try to populate it. * * We atomically detach the top of branch from the tree, store the * block number of its root in *@top, pointers to buffer_heads of * partially truncated blocks - in @chain[].bh and pointers to * their last elements that should not be removed - in * @chain[].p. Return value is the pointer to last filled element * of @chain. * * The work left to caller to do the actual freeing of subtrees: * a) free the subtree starting from *@top * b) free the subtrees whose roots are stored in * (@chain[i].p+1 .. end of @chain[i].bh->b_data) * c) free the subtrees growing from the inode past the @chain[0]. * (no partially truncated stuff there). */ static Indirect *ext4_find_shared(struct inode *inode, int depth, ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) { Indirect *partial, *p; int k, err; *top = 0; /* Make k index the deepest non-null offset + 1 */ for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = ext4_get_branch(inode, k, offsets, chain, &err); /* Writer: pointers */ if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) /* Writer: end */ goto no_top; for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; /* Nope, don't do this in ext4. Must leave the tree intact */ #if 0 *p->p = 0; #endif } /* Writer: end */ while (partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } /* * Zero a number of block pointers in either an inode or an indirect block. * If we restart the transaction we must again get write access to the * indirect block for further modification. * * We release `count' blocks on disk, but (last - first) may be greater * than `count' because there can be holes in there. * * Return 0 on success, 1 on invalid block range * and < 0 on fatal error. */ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block_to_free, unsigned long count, __le32 *first, __le32 *last) { __le32 *p; int flags = EXT4_FREE_BLOCKS_VALIDATED; int err; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; else if (ext4_should_journal_data(inode)) flags |= EXT4_FREE_BLOCKS_FORGET; if (!ext4_inode_block_valid(inode, block_to_free, count)) { EXT4_ERROR_INODE(inode, "attempt to clear invalid " "blocks %llu len %lu", (unsigned long long) block_to_free, count); return 1; } err = ext4_ind_truncate_ensure_credits(handle, inode, bh, ext4_free_data_revoke_credits(inode, count)); if (err < 0) goto out_err; for (p = first; p < last; p++) *p = 0; ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); return 0; out_err: ext4_std_error(inode->i_sb, err); return err; } /** * ext4_free_data - free a list of data blocks * @handle: handle for this transaction * @inode: inode we are dealing with * @this_bh: indirect buffer_head which contains *@first and *@last * @first: array of block numbers * @last: points immediately past the end of array * * We are freeing all blocks referred from that array (numbers are stored as * little-endian 32-bit) and updating @inode->i_blocks appropriately. * * We accumulate contiguous runs of blocks to free. Conveniently, if these * blocks are contiguous then releasing them at one time will only affect one * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't * actually use a lot of journal space. * * @this_bh will be %NULL if @first and @last point into the inode's direct * block pointers. */ static void ext4_free_data(handle_t *handle, struct inode *inode, struct buffer_head *this_bh, __le32 *first, __le32 *last) { ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ unsigned long count = 0; /* Number of blocks in the run */ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind corresponding to block_to_free */ ext4_fsblk_t nr; /* Current block # */ __le32 *p; /* Pointer into inode/ind for current block */ int err = 0; if (this_bh) { /* For indirect block */ BUFFER_TRACE(this_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode->i_sb, this_bh, EXT4_JTR_NONE); /* Important: if we can't update the indirect pointers * to the blocks, we can't free them. */ if (err) return; } for (p = first; p < last; p++) { nr = le32_to_cpu(*p); if (nr) { /* accumulate blocks to free if they're contiguous */ if (count == 0) { block_to_free = nr; block_to_free_p = p; count = 1; } else if (nr == block_to_free + count) { count++; } else { err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); if (err) break; block_to_free = nr; block_to_free_p = p; count = 1; } } } if (!err && count > 0) err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); if (err < 0) /* fatal error */ return; if (this_bh) { BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); /* * The buffer head should have an attached journal head at this * point. However, if the data is corrupted and an indirect * block pointed to itself, it would have been detached when * the block was cleared. Check for this instead of OOPSing. */ if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) ext4_handle_dirty_metadata(handle, inode, this_bh); else EXT4_ERROR_INODE(inode, "circular indirect block detected at " "block %llu", (unsigned long long) this_bh->b_blocknr); } } /** * ext4_free_branches - free an array of branches * @handle: JBD handle for this transaction * @inode: inode we are dealing with * @parent_bh: the buffer_head which contains *@first and *@last * @first: array of block numbers * @last: pointer immediately past the end of array * @depth: depth of the branches to free * * We are freeing all blocks referred from these branches (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */ static void ext4_free_branches(handle_t *handle, struct inode *inode, struct buffer_head *parent_bh, __le32 *first, __le32 *last, int depth) { ext4_fsblk_t nr; __le32 *p; if (ext4_handle_is_aborted(handle)) return; if (depth--) { struct buffer_head *bh; int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); p = last; while (--p >= first) { nr = le32_to_cpu(*p); if (!nr) continue; /* A hole */ if (!ext4_inode_block_valid(inode, nr, 1)) { EXT4_ERROR_INODE(inode, "invalid indirect mapped " "block %lu (level %d)", (unsigned long) nr, depth); break; } /* Go read the buffer for the next level down */ bh = ext4_sb_bread_nofail(inode->i_sb, nr); /* * A read failure? Report error and clear slot * (should be rare). */ if (IS_ERR(bh)) { ext4_error_inode_block(inode, nr, -PTR_ERR(bh), "Read failure"); continue; } /* This zaps the entire block. Bottom up. */ BUFFER_TRACE(bh, "free child branches"); ext4_free_branches(handle, inode, bh, (__le32 *) bh->b_data, (__le32 *) bh->b_data + addr_per_block, depth); brelse(bh); /* * Everything below this pointer has been * released. Now let this top-of-subtree go. * * We want the freeing of this indirect block to be * atomic in the journal with the updating of the * bitmap block which owns it. So make some room in * the journal. * * We zero the parent pointer *after* freeing its * pointee in the bitmaps, so if extend_transaction() * for some reason fails to put the bitmap changes and * the release into the same transaction, recovery * will merely complain about releasing a free block, * rather than leaking blocks. */ if (ext4_handle_is_aborted(handle)) return; if (ext4_ind_truncate_ensure_credits(handle, inode, NULL, ext4_free_metadata_revoke_credits( inode->i_sb, 1)) < 0) return; /* * The forget flag here is critical because if * we are journaling (and not doing data * journaling), we have to make sure a revoke * record is written to prevent the journal * replay from overwriting the (former) * indirect block if it gets reallocated as a * data block. This must happen in the same * transaction where the data blocks are * actually freed. */ ext4_free_blocks(handle, inode, NULL, nr, 1, EXT4_FREE_BLOCKS_METADATA| EXT4_FREE_BLOCKS_FORGET); if (parent_bh) { /* * The block which we have just freed is * pointed to by an indirect block: journal it */ BUFFER_TRACE(parent_bh, "get_write_access"); if (!ext4_journal_get_write_access(handle, inode->i_sb, parent_bh, EXT4_JTR_NONE)) { *p = 0; BUFFER_TRACE(parent_bh, "call ext4_handle_dirty_metadata"); ext4_handle_dirty_metadata(handle, inode, parent_bh); } } } } else { /* We have reached the bottom of the tree. */ BUFFER_TRACE(parent_bh, "free data blocks"); ext4_free_data(handle, inode, parent_bh, first, last); } } void ext4_ind_truncate(handle_t *handle, struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); __le32 *i_data = ei->i_data; int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); ext4_lblk_t offsets[4]; Indirect chain[4]; Indirect *partial; __le32 nr = 0; int n = 0; ext4_lblk_t last_block, max_block; unsigned blocksize = inode->i_sb->s_blocksize; last_block = (inode->i_size + blocksize-1) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); if (last_block != max_block) { n = ext4_block_to_path(inode, last_block, offsets, NULL); if (n == 0) return; } ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); /* * The orphan list entry will now protect us from any crash which * occurs before the truncate completes, so it is now safe to propagate * the new, shorter inode size (held for now in i_size) into the * on-disk inode. We do this via i_disksize, which is the value which * ext4 *really* writes onto the disk inode. */ ei->i_disksize = inode->i_size; if (last_block == max_block) { /* * It is unnecessary to free any data blocks if last_block is * equal to the indirect block limit. */ return; } else if (n == 1) { /* direct blocks */ ext4_free_data(handle, inode, NULL, i_data+offsets[0], i_data + EXT4_NDIR_BLOCKS); goto do_indirects; } partial = ext4_find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (not detached) */ if (nr) { if (partial == chain) { /* Shared branch grows from the inode */ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, (chain+n-1) - partial); *partial->p = 0; /* * We mark the inode dirty prior to restart, * and prior to stop. No need for it here. */ } else { /* Shared branch grows from an indirect block */ BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, partial->p, partial->p+1, (chain+n-1) - partial); } } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { ext4_free_branches(handle, inode, partial->bh, partial->p + 1, (__le32*)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { default: nr = i_data[EXT4_IND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } fallthrough; case EXT4_IND_BLOCK: nr = i_data[EXT4_DIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } fallthrough; case EXT4_DIND_BLOCK: nr = i_data[EXT4_TIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } fallthrough; case EXT4_TIND_BLOCK: ; } } /** * ext4_ind_remove_space - remove space from the range * @handle: JBD handle for this transaction * @inode: inode we are dealing with * @start: First block to remove * @end: One block after the last block to remove (exclusive) * * Free the blocks in the defined range (end is exclusive endpoint of * range). This is used by ext4_punch_hole(). */ int ext4_ind_remove_space(handle_t *handle, struct inode *inode, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_inode_info *ei = EXT4_I(inode); __le32 *i_data = ei->i_data; int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); ext4_lblk_t offsets[4], offsets2[4]; Indirect chain[4], chain2[4]; Indirect *partial, *partial2; Indirect *p = NULL, *p2 = NULL; ext4_lblk_t max_block; __le32 nr = 0, nr2 = 0; int n = 0, n2 = 0; unsigned blocksize = inode->i_sb->s_blocksize; max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); if (end >= max_block) end = max_block; if ((start >= end) || (start > max_block)) return 0; n = ext4_block_to_path(inode, start, offsets, NULL); n2 = ext4_block_to_path(inode, end, offsets2, NULL); BUG_ON(n > n2); if ((n == 1) && (n == n2)) { /* We're punching only within direct block range */ ext4_free_data(handle, inode, NULL, i_data + offsets[0], i_data + offsets2[0]); return 0; } else if (n2 > n) { /* * Start and end are on a different levels so we're going to * free partial block at start, and partial block at end of * the range. If there are some levels in between then * do_indirects label will take care of that. */ if (n == 1) { /* * Start is at the direct block level, free * everything to the end of the level. */ ext4_free_data(handle, inode, NULL, i_data + offsets[0], i_data + EXT4_NDIR_BLOCKS); goto end_range; } partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); if (nr) { if (partial == chain) { /* Shared branch grows from the inode */ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, (chain+n-1) - partial); *partial->p = 0; } else { /* Shared branch grows from an indirect block */ BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, partial->p, partial->p+1, (chain+n-1) - partial); } } /* * Clear the ends of indirect blocks on the shared branch * at the start of the range */ while (partial > chain) { ext4_free_branches(handle, inode, partial->bh, partial->p + 1, (__le32 *)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); partial--; } end_range: partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); if (nr2) { if (partial2 == chain2) { /* * Remember, end is exclusive so here we're at * the start of the next level we're not going * to free. Everything was covered by the start * of the range. */ goto do_indirects; } } else { /* * ext4_find_shared returns Indirect structure which * points to the last element which should not be * removed by truncate. But this is end of the range * in punch_hole so we need to point to the next element */ partial2->p++; } /* * Clear the ends of indirect blocks on the shared branch * at the end of the range */ while (partial2 > chain2) { ext4_free_branches(handle, inode, partial2->bh, (__le32 *)partial2->bh->b_data, partial2->p, (chain2+n2-1) - partial2); partial2--; } goto do_indirects; } /* Punch happened within the same level (n == n2) */ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); /* Free top, but only if partial2 isn't its subtree. */ if (nr) { int level = min(partial - chain, partial2 - chain2); int i; int subtree = 1; for (i = 0; i <= level; i++) { if (offsets[i] != offsets2[i]) { subtree = 0; break; } } if (!subtree) { if (partial == chain) { /* Shared branch grows from the inode */ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, (chain+n-1) - partial); *partial->p = 0; } else { /* Shared branch grows from an indirect block */ BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, partial->p, partial->p+1, (chain+n-1) - partial); } } } if (!nr2) { /* * ext4_find_shared returns Indirect structure which * points to the last element which should not be * removed by truncate. But this is end of the range * in punch_hole so we need to point to the next element */ partial2->p++; } while (partial > chain || partial2 > chain2) { int depth = (chain+n-1) - partial; int depth2 = (chain2+n2-1) - partial2; if (partial > chain && partial2 > chain2 && partial->bh->b_blocknr == partial2->bh->b_blocknr) { /* * We've converged on the same block. Clear the range, * then we're done. */ ext4_free_branches(handle, inode, partial->bh, partial->p + 1, partial2->p, (chain+n-1) - partial); goto cleanup; } /* * The start and end partial branches may not be at the same * level even though the punch happened within one level. So, we * give them a chance to arrive at the same level, then walk * them in step with each other until we converge on the same * block. */ if (partial > chain && depth <= depth2) { ext4_free_branches(handle, inode, partial->bh, partial->p + 1, (__le32 *)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); partial--; } if (partial2 > chain2 && depth2 <= depth) { ext4_free_branches(handle, inode, partial2->bh, (__le32 *)partial2->bh->b_data, partial2->p, (chain2+n2-1) - partial2); partial2--; } } cleanup: while (p && p > chain) { BUFFER_TRACE(p->bh, "call brelse"); brelse(p->bh); p--; } while (p2 && p2 > chain2) { BUFFER_TRACE(p2->bh, "call brelse"); brelse(p2->bh); p2--; } return 0; do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { default: if (++n >= n2) break; nr = i_data[EXT4_IND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } fallthrough; case EXT4_IND_BLOCK: if (++n >= n2) break; nr = i_data[EXT4_DIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } fallthrough; case EXT4_DIND_BLOCK: if (++n >= n2) break; nr = i_data[EXT4_TIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } fallthrough; case EXT4_TIND_BLOCK: ; } goto cleanup; } |
| 7 7 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/atomic.h> #include <linux/inetdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_nat_masquerade.h> struct masq_dev_work { struct work_struct work; struct net *net; netns_tracker ns_tracker; union nf_inet_addr addr; int ifindex; int (*iter)(struct nf_conn *i, void *data); }; #define MAX_MASQ_WORKER_COUNT 16 static DEFINE_MUTEX(masq_mutex); static unsigned int masq_refcnt __read_mostly; static atomic_t masq_worker_count __read_mostly; unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range2 newrange; const struct rtable *rt; __be32 newsrc, nh; WARN_ON(hooknum != NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static void iterate_cleanup_work(struct work_struct *work) { struct nf_ct_iter_data iter_data = {}; struct masq_dev_work *w; w = container_of(work, struct masq_dev_work, work); iter_data.net = w->net; iter_data.data = (void *)w; nf_ct_iterate_cleanup_net(w->iter, &iter_data); put_net_track(w->net, &w->ns_tracker); kfree(w); atomic_dec(&masq_worker_count); module_put(THIS_MODULE); } /* Iterate conntrack table in the background and remove conntrack entries * that use the device/address being removed. * * In case too many work items have been queued already or memory allocation * fails iteration is skipped, conntrack entries will time out eventually. */ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, int ifindex, int (*iter)(struct nf_conn *i, void *data), gfp_t gfp_flags) { struct masq_dev_work *w; if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) return; net = maybe_get_net(net); if (!net) return; if (!try_module_get(THIS_MODULE)) goto err_module; w = kzalloc(sizeof(*w), gfp_flags); if (w) { /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ atomic_inc(&masq_worker_count); INIT_WORK(&w->work, iterate_cleanup_work); w->ifindex = ifindex; w->net = net; netns_tracker_alloc(net, &w->ns_tracker, gfp_flags); w->iter = iter; if (addr) w->addr = *addr; schedule_work(&w->work); return; } module_put(THIS_MODULE); err_module: put_net(net); } static int device_cmp(struct nf_conn *i, void *arg) { const struct nf_conn_nat *nat = nfct_nat(i); const struct masq_dev_work *w = arg; if (!nat) return 0; return nat->masq_index == w->ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for * conntracks which were associated with that device, * and forget them. */ nf_nat_masq_schedule(net, NULL, dev->ifindex, device_cmp, GFP_KERNEL); } return NOTIFY_DONE; } static int inet_cmp(struct nf_conn *ct, void *ptr) { struct nf_conntrack_tuple *tuple; struct masq_dev_work *w = ptr; if (!device_cmp(ct, ptr)) return 0; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct in_ifaddr *ifa = ptr; const struct in_device *idev; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; /* The masq_dev_notifier will catch the case of the device going * down. So if the inetdev is dead and being destroyed we have * no work to do. Otherwise this is an individual address removal * and we have to perform the flush. */ idev = ifa->ifa_dev; if (idev->dead) return NOTIFY_DONE; memset(&addr, 0, sizeof(addr)); addr.ip = ifa->ifa_address; dev = idev->dev; nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, inet_cmp, GFP_KERNEL); return NOTIFY_DONE; } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; #if IS_ENABLED(CONFIG_IPV6) static int nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr) { #ifdef CONFIG_IPV6_MODULE const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr); #else return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr); #endif } unsigned int nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, const struct net_device *out) { enum ip_conntrack_info ctinfo; struct nf_conn_nat *nat; struct in6_addr src; struct nf_conn *ct; struct nf_nat_range2 newrange; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, &ipv6_hdr(skb)->daddr, 0, &src) < 0) return NF_DROP; nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = src; newrange.max_addr.in6 = src; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). * * Defer it to the system workqueue. * * As we can have 'a lot' of inet_events (depending on amount of ipv6 * addresses being deleted), we also need to limit work item queue. */ static int masq_inet6_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; dev = ifa->idev->dev; memset(&addr, 0, sizeof(addr)); addr.in6 = ifa->addr; nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, GFP_ATOMIC); return NOTIFY_DONE; } static struct notifier_block masq_inet6_notifier = { .notifier_call = masq_inet6_event, }; static int nf_nat_masquerade_ipv6_register_notifier(void) { return register_inet6addr_notifier(&masq_inet6_notifier); } #else static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; } #endif int nf_nat_masquerade_inet_register_notifiers(void) { int ret = 0; mutex_lock(&masq_mutex); if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) { ret = -EOVERFLOW; goto out_unlock; } /* check if the notifier was already set */ if (++masq_refcnt > 1) goto out_unlock; /* Register for device down reports */ ret = register_netdevice_notifier(&masq_dev_notifier); if (ret) goto err_dec; /* Register IP address change reports */ ret = register_inetaddr_notifier(&masq_inet_notifier); if (ret) goto err_unregister; ret = nf_nat_masquerade_ipv6_register_notifier(); if (ret) goto err_unreg_inet; mutex_unlock(&masq_mutex); return ret; err_unreg_inet: unregister_inetaddr_notifier(&masq_inet_notifier); err_unregister: unregister_netdevice_notifier(&masq_dev_notifier); err_dec: masq_refcnt--; out_unlock: mutex_unlock(&masq_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers); void nf_nat_masquerade_inet_unregister_notifiers(void) { mutex_lock(&masq_mutex); /* check if the notifiers still have clients */ if (--masq_refcnt > 0) goto out_unlock; unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&masq_inet6_notifier); #endif out_unlock: mutex_unlock(&masq_mutex); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers); |
| 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 | // SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI transport class definitions * * Copyright (C) IBM Corporation, 2004 * Copyright (C) Mike Christie, 2004 - 2005 * Copyright (C) Dmitry Yusupov, 2004 - 2005 * Copyright (C) Alex Aizman, 2004 - 2005 */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/bsg-lib.h> #include <linux/idr.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/iscsi_if.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_bsg_iscsi.h> #define ISCSI_TRANSPORT_VERSION "2.0-870" #define ISCSI_SEND_MAX_ALLOWED 10 #define CREATE_TRACE_POINTS #include <trace/events/iscsi.h> /* * Export tracepoint symbols to be used by other modules. */ EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp); static int dbg_session; module_param_named(debug_session, dbg_session, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_session, "Turn on debugging for sessions in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); static int dbg_conn; module_param_named(debug_conn, dbg_conn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_conn, "Turn on debugging for connections in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); #define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \ do { \ if (dbg_session) \ iscsi_cls_session_printk(KERN_INFO, _session, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \ &(_session)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); #define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ do { \ if (dbg_conn) \ iscsi_cls_conn_printk(KERN_INFO, _conn, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \ &(_conn)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); struct iscsi_internal { struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; struct device dev; struct transport_container conn_cont; struct transport_container session_cont; }; static DEFINE_IDR(iscsi_ep_idr); static DEFINE_MUTEX(iscsi_ep_idr_mutex); static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_conn_cleanup_workq; static DEFINE_IDA(iscsi_sess_ida); /* * list of registered transports and lock that must * be held while accessing list. The iscsi_transport_lock must * be acquired after the rx_queue_mutex. */ static LIST_HEAD(iscsi_transports); static DEFINE_SPINLOCK(iscsi_transport_lock); #define to_iscsi_internal(tmpl) \ container_of(tmpl, struct iscsi_internal, t) #define dev_to_iscsi_internal(_dev) \ container_of(_dev, struct iscsi_internal, dev) static void iscsi_transport_release(struct device *dev) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); kfree(priv); } /* * iscsi_transport_class represents the iscsi_transports that are * registered. */ static struct class iscsi_transport_class = { .name = "iscsi_transport", .dev_release = iscsi_transport_release, }; static ssize_t show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); if (!capable(CAP_SYS_ADMIN)) return -EACCES; return sysfs_emit(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); #define show_transport_attr(name, format) \ static ssize_t \ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); show_transport_attr(caps, "0x%x"); static struct attribute *iscsi_transport_attrs[] = { &dev_attr_handle.attr, &dev_attr_caps.attr, NULL, }; static struct attribute_group iscsi_transport_group = { .attrs = iscsi_transport_attrs, }; /* * iSCSI endpoint attrs */ #define iscsi_dev_to_endpoint(_dev) \ container_of(_dev, struct iscsi_endpoint, dev) #define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) static void iscsi_endpoint_release(struct device *dev) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, ep->id); mutex_unlock(&iscsi_ep_idr_mutex); kfree(ep); } static struct class iscsi_endpoint_class = { .name = "iscsi_endpoint", .dev_release = iscsi_endpoint_release, }; static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); return sysfs_emit(buf, "%d\n", ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); static struct attribute *iscsi_endpoint_attrs[] = { &dev_attr_ep_handle.attr, NULL, }; static struct attribute_group iscsi_endpoint_group = { .attrs = iscsi_endpoint_attrs, }; struct iscsi_endpoint * iscsi_create_endpoint(int dd_size) { struct iscsi_endpoint *ep; int err, id; ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); if (!ep) return NULL; mutex_lock(&iscsi_ep_idr_mutex); /* * First endpoint id should be 1 to comply with user space * applications (iscsid). */ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", id); goto free_ep; } mutex_unlock(&iscsi_ep_idr_mutex); ep->id = id; ep->dev.class = &iscsi_endpoint_class; dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) goto put_dev; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) goto unregister_dev; if (dd_size) ep->dd_data = &ep[1]; return ep; unregister_dev: device_unregister(&ep->dev); return NULL; put_dev: mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, id); mutex_unlock(&iscsi_ep_idr_mutex); put_device(&ep->dev); return NULL; free_ep: kfree(ep); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_endpoint); void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) { sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); device_unregister(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); void iscsi_put_endpoint(struct iscsi_endpoint *ep) { put_device(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_put_endpoint); /** * iscsi_lookup_endpoint - get ep from handle * @handle: endpoint handle * * Caller must do a iscsi_put_endpoint. */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { struct iscsi_endpoint *ep; mutex_lock(&iscsi_ep_idr_mutex); ep = idr_find(&iscsi_ep_idr, handle); if (!ep) goto unlock; get_device(&ep->dev); unlock: mutex_unlock(&iscsi_ep_idr_mutex); return ep; } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); /* * Interface to display network param to sysfs */ static void iscsi_iface_release(struct device *dev) { struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct device *parent = iface->dev.parent; kfree(iface); put_device(parent); } static struct class iscsi_iface_class = { .name = "iscsi_iface", .dev_release = iscsi_iface_release, }; #define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* iface attrs show */ #define iscsi_iface_attr_show(type, name, param_type, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \ struct iscsi_transport *t = iface->transport; \ return t->get_iface_param(iface, param_type, param, buf); \ } \ #define iscsi_iface_net_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); #define iscsi_iface_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); /* generic read only ipv4 attribute */ iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); iscsi_iface_net_attr(ipv4_iface, grat_arp_en, ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); iscsi_iface_net_attr(ipv4_iface, fragment_disable, ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL); /* generic read only ipv6 attribute */ iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_state, ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); iscsi_iface_net_attr(ipv6_iface, router_state, ISCSI_NET_PARAM_IPV6_ROUTER_STATE); iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); iscsi_iface_net_attr(ipv6_iface, traffic_class, ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU); /* common read only iface attribute */ iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID); iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); iscsi_iface_net_attr(iface, tcp_nagle_disable, ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); /* common iscsi specific settings attributes */ iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); iscsi_iface_attr(iface, data_seq_in_order, ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); iscsi_iface_attr(iface, discovery_auth_optional, ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); iscsi_iface_attr(iface, discovery_logout, ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); iscsi_iface_attr(iface, strict_login_comp_en, ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME); static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; int param = -1; if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) param = ISCSI_IFACE_PARAM_HDRDGST_EN; else if (attr == &dev_attr_iface_data_digest.attr) param = ISCSI_IFACE_PARAM_DATADGST_EN; else if (attr == &dev_attr_iface_immediate_data.attr) param = ISCSI_IFACE_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_iface_initial_r2t.attr) param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_iface_data_seq_in_order.attr) param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_iface_data_pdu_in_order.attr) param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_iface_erl.attr) param = ISCSI_IFACE_PARAM_ERL; else if (attr == &dev_attr_iface_max_recv_dlength.attr) param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_iface_first_burst_len.attr) param = ISCSI_IFACE_PARAM_FIRST_BURST; else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) param = ISCSI_IFACE_PARAM_MAX_R2T; else if (attr == &dev_attr_iface_max_burst_len.attr) param = ISCSI_IFACE_PARAM_MAX_BURST; else if (attr == &dev_attr_iface_chap_auth.attr) param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_iface_bidi_chap.attr) param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_iface_discovery_auth_optional.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_iface_discovery_logout.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_iface_strict_login_comp_en.attr) param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; if (param != -1) return t->attr_is_visible(ISCSI_IFACE_PARAM, param); if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; else if (attr == &dev_attr_iface_vlan_id.attr) param = ISCSI_NET_PARAM_VLAN_ID; else if (attr == &dev_attr_iface_vlan_priority.attr) param = ISCSI_NET_PARAM_VLAN_PRIORITY; else if (attr == &dev_attr_iface_vlan_enabled.attr) param = ISCSI_NET_PARAM_VLAN_ENABLED; else if (attr == &dev_attr_iface_mtu.attr) param = ISCSI_NET_PARAM_MTU; else if (attr == &dev_attr_iface_port.attr) param = ISCSI_NET_PARAM_PORT; else if (attr == &dev_attr_iface_ipaddress_state.attr) param = ISCSI_NET_PARAM_IPADDR_STATE; else if (attr == &dev_attr_iface_delayed_ack_en.attr) param = ISCSI_NET_PARAM_DELAYED_ACK_EN; else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf.attr) param = ISCSI_NET_PARAM_TCP_WSF; else if (attr == &dev_attr_iface_tcp_timer_scale.attr) param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_iface_cache_id.attr) param = ISCSI_NET_PARAM_CACHE_ID; else if (attr == &dev_attr_iface_redirect_en.attr) param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; else if (attr == &dev_attr_ipv4_iface_gateway.attr) param = ISCSI_NET_PARAM_IPV4_GW; else if (attr == &dev_attr_ipv4_iface_subnet.attr) param = ISCSI_NET_PARAM_IPV4_SUBNET; else if (attr == &dev_attr_ipv4_iface_bootproto.attr) param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; else if (attr == &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; else if (attr == &dev_attr_ipv4_iface_tos_en.attr) param = ISCSI_NET_PARAM_IPV4_TOS_EN; else if (attr == &dev_attr_ipv4_iface_tos.attr) param = ISCSI_NET_PARAM_IPV4_TOS; else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; else if (attr == &dev_attr_ipv4_iface_fragment_disable.attr) param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; else if (attr == &dev_attr_ipv4_iface_incoming_forwarding_en.attr) param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; else if (attr == &dev_attr_ipv4_iface_ttl.attr) param = ISCSI_NET_PARAM_IPV4_TTL; else return 0; } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { if (attr == &dev_attr_ipv6_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV6_ADDR; else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL; else if (attr == &dev_attr_ipv6_iface_router_addr.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER; else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; else if (attr == &dev_attr_ipv6_iface_router_state.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; else if (attr == &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; else if (attr == &dev_attr_ipv6_iface_mld_en.attr) param = ISCSI_NET_PARAM_IPV6_MLD_EN; else if (attr == &dev_attr_ipv6_iface_flow_label.attr) param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU; else return 0; } else { WARN_ONCE(1, "Invalid iface attr"); return 0; } return t->attr_is_visible(ISCSI_NET_PARAM, param); } static struct attribute *iscsi_iface_attrs[] = { &dev_attr_iface_enabled.attr, &dev_attr_iface_vlan_id.attr, &dev_attr_iface_vlan_priority.attr, &dev_attr_iface_vlan_enabled.attr, &dev_attr_ipv4_iface_ipaddress.attr, &dev_attr_ipv4_iface_gateway.attr, &dev_attr_ipv4_iface_subnet.attr, &dev_attr_ipv4_iface_bootproto.attr, &dev_attr_ipv6_iface_ipaddress.attr, &dev_attr_ipv6_iface_link_local_addr.attr, &dev_attr_ipv6_iface_router_addr.attr, &dev_attr_ipv6_iface_ipaddr_autocfg.attr, &dev_attr_ipv6_iface_link_local_autocfg.attr, &dev_attr_iface_mtu.attr, &dev_attr_iface_port.attr, &dev_attr_iface_ipaddress_state.attr, &dev_attr_iface_delayed_ack_en.attr, &dev_attr_iface_tcp_nagle_disable.attr, &dev_attr_iface_tcp_wsf_disable.attr, &dev_attr_iface_tcp_wsf.attr, &dev_attr_iface_tcp_timer_scale.attr, &dev_attr_iface_tcp_timestamp_en.attr, &dev_attr_iface_cache_id.attr, &dev_attr_iface_redirect_en.attr, &dev_attr_iface_def_taskmgmt_tmo.attr, &dev_attr_iface_header_digest.attr, &dev_attr_iface_data_digest.attr, &dev_attr_iface_immediate_data.attr, &dev_attr_iface_initial_r2t.attr, &dev_attr_iface_data_seq_in_order.attr, &dev_attr_iface_data_pdu_in_order.attr, &dev_attr_iface_erl.attr, &dev_attr_iface_max_recv_dlength.attr, &dev_attr_iface_first_burst_len.attr, &dev_attr_iface_max_outstanding_r2t.attr, &dev_attr_iface_max_burst_len.attr, &dev_attr_iface_chap_auth.attr, &dev_attr_iface_bidi_chap.attr, &dev_attr_iface_discovery_auth_optional.attr, &dev_attr_iface_discovery_logout.attr, &dev_attr_iface_strict_login_comp_en.attr, &dev_attr_iface_initiator_name.attr, &dev_attr_ipv4_iface_dhcp_dns_address_en.attr, &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, &dev_attr_ipv4_iface_tos_en.attr, &dev_attr_ipv4_iface_tos.attr, &dev_attr_ipv4_iface_grat_arp_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id.attr, &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_vendor_id.attr, &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, &dev_attr_ipv4_iface_fragment_disable.attr, &dev_attr_ipv4_iface_incoming_forwarding_en.attr, &dev_attr_ipv4_iface_ttl.attr, &dev_attr_ipv6_iface_link_local_state.attr, &dev_attr_ipv6_iface_router_state.attr, &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, &dev_attr_ipv6_iface_mld_en.attr, &dev_attr_ipv6_iface_flow_label.attr, &dev_attr_ipv6_iface_traffic_class.attr, &dev_attr_ipv6_iface_hop_limit.attr, &dev_attr_ipv6_iface_nd_reachable_tmo.attr, &dev_attr_ipv6_iface_nd_rexmit_time.attr, &dev_attr_ipv6_iface_nd_stale_tmo.attr, &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, &dev_attr_ipv6_iface_router_adv_link_mtu.attr, NULL, }; static struct attribute_group iscsi_iface_group = { .attrs = iscsi_iface_attrs, .is_visible = iscsi_iface_attr_is_visible, }; /* convert iscsi_ipaddress_state values to ascii string name */ static const struct { enum iscsi_ipaddress_state value; char *name; } iscsi_ipaddress_state_names[] = { {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" }, {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" }, {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" }, {ISCSI_IPDDRESS_STATE_VALID, "Valid" }, {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" }, {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" }, {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" }, }; char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { if (iscsi_ipaddress_state_names[i].value == port_state) { state = iscsi_ipaddress_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); /* convert iscsi_router_state values to ascii string name */ static const struct { enum iscsi_router_state value; char *name; } iscsi_router_state_names[] = { {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" }, {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" }, {ISCSI_ROUTER_STATE_MANUAL, "Manual" }, {ISCSI_ROUTER_STATE_STALE, "Stale" }, }; char *iscsi_get_router_state_name(enum iscsi_router_state router_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { if (iscsi_router_state_names[i].value == router_state) { state = iscsi_router_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); struct iscsi_iface * iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t iface_type, uint32_t iface_num, int dd_size) { struct iscsi_iface *iface; int err; iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL); if (!iface) return NULL; iface->transport = transport; iface->iface_type = iface_type; iface->iface_num = iface_num; iface->dev.release = iscsi_iface_release; iface->dev.class = &iscsi_iface_class; /* parent reference released in iscsi_iface_release */ iface->dev.parent = get_device(&shost->shost_gendev); if (iface_type == ISCSI_IFACE_TYPE_IPV4) dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no, iface_num); else dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no, iface_num); err = device_register(&iface->dev); if (err) goto put_dev; err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); if (err) goto unreg_iface; if (dd_size) iface->dd_data = &iface[1]; return iface; unreg_iface: device_unregister(&iface->dev); return NULL; put_dev: put_device(&iface->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_iface); void iscsi_destroy_iface(struct iscsi_iface *iface) { sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group); device_unregister(&iface->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_iface); /* * Interface to display flash node params to sysfs */ #define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* flash node session attrs show */ #define iscsi_flashnode_sess_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_dev_to_flash_session(dev);\ struct iscsi_transport *t = fnode_sess->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_sess_attr(type, name, param) \ iscsi_flashnode_sess_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node session attributes */ iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); iscsi_flashnode_sess_attr(fnode, discovery_session, ISCSI_FLASHNODE_DISCOVERY_SESS); iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); iscsi_flashnode_sess_attr(fnode, data_seq_in_order, ISCSI_FLASHNODE_DATASEQ_INORDER); iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, ISCSI_FLASHNODE_PDU_INORDER); iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); iscsi_flashnode_sess_attr(fnode, discovery_logout, ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); iscsi_flashnode_sess_attr(fnode, def_time2retain, ISCSI_FLASHNODE_DEF_TIME2RETAIN); iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); iscsi_flashnode_sess_attr(fnode, discovery_parent_type, ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); static struct attribute *iscsi_flashnode_sess_attrs[] = { &dev_attr_fnode_auto_snd_tgt_disable.attr, &dev_attr_fnode_discovery_session.attr, &dev_attr_fnode_portal_type.attr, &dev_attr_fnode_entry_enable.attr, &dev_attr_fnode_immediate_data.attr, &dev_attr_fnode_initial_r2t.attr, &dev_attr_fnode_data_seq_in_order.attr, &dev_attr_fnode_data_pdu_in_order.attr, &dev_attr_fnode_chap_auth.attr, &dev_attr_fnode_discovery_logout.attr, &dev_attr_fnode_bidi_chap.attr, &dev_attr_fnode_discovery_auth_optional.attr, &dev_attr_fnode_erl.attr, &dev_attr_fnode_first_burst_len.attr, &dev_attr_fnode_def_time2wait.attr, &dev_attr_fnode_def_time2retain.attr, &dev_attr_fnode_max_outstanding_r2t.attr, &dev_attr_fnode_isid.attr, &dev_attr_fnode_tsid.attr, &dev_attr_fnode_max_burst_len.attr, &dev_attr_fnode_def_taskmgmt_tmo.attr, &dev_attr_fnode_targetalias.attr, &dev_attr_fnode_targetname.attr, &dev_attr_fnode_tpgt.attr, &dev_attr_fnode_discovery_parent_idx.attr, &dev_attr_fnode_discovery_parent_type.attr, &dev_attr_fnode_chap_in_idx.attr, &dev_attr_fnode_chap_out_idx.attr, &dev_attr_fnode_username.attr, &dev_attr_fnode_username_in.attr, &dev_attr_fnode_password.attr, &dev_attr_fnode_password_in.attr, &dev_attr_fnode_is_boot_target.attr, NULL, }; static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); struct iscsi_transport *t = fnode_sess->transport; int param; if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; } else if (attr == &dev_attr_fnode_discovery_session.attr) { param = ISCSI_FLASHNODE_DISCOVERY_SESS; } else if (attr == &dev_attr_fnode_portal_type.attr) { param = ISCSI_FLASHNODE_PORTAL_TYPE; } else if (attr == &dev_attr_fnode_entry_enable.attr) { param = ISCSI_FLASHNODE_ENTRY_EN; } else if (attr == &dev_attr_fnode_immediate_data.attr) { param = ISCSI_FLASHNODE_IMM_DATA_EN; } else if (attr == &dev_attr_fnode_initial_r2t.attr) { param = ISCSI_FLASHNODE_INITIAL_R2T_EN; } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { param = ISCSI_FLASHNODE_DATASEQ_INORDER; } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { param = ISCSI_FLASHNODE_PDU_INORDER; } else if (attr == &dev_attr_fnode_chap_auth.attr) { param = ISCSI_FLASHNODE_CHAP_AUTH_EN; } else if (attr == &dev_attr_fnode_discovery_logout.attr) { param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; } else if (attr == &dev_attr_fnode_bidi_chap.attr) { param = ISCSI_FLASHNODE_BIDI_CHAP_EN; } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; } else if (attr == &dev_attr_fnode_erl.attr) { param = ISCSI_FLASHNODE_ERL; } else if (attr == &dev_attr_fnode_first_burst_len.attr) { param = ISCSI_FLASHNODE_FIRST_BURST; } else if (attr == &dev_attr_fnode_def_time2wait.attr) { param = ISCSI_FLASHNODE_DEF_TIME2WAIT; } else if (attr == &dev_attr_fnode_def_time2retain.attr) { param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { param = ISCSI_FLASHNODE_MAX_R2T; } else if (attr == &dev_attr_fnode_isid.attr) { param = ISCSI_FLASHNODE_ISID; } else if (attr == &dev_attr_fnode_tsid.attr) { param = ISCSI_FLASHNODE_TSID; } else if (attr == &dev_attr_fnode_max_burst_len.attr) { param = ISCSI_FLASHNODE_MAX_BURST; } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; } else if (attr == &dev_attr_fnode_targetalias.attr) { param = ISCSI_FLASHNODE_ALIAS; } else if (attr == &dev_attr_fnode_targetname.attr) { param = ISCSI_FLASHNODE_NAME; } else if (attr == &dev_attr_fnode_tpgt.attr) { param = ISCSI_FLASHNODE_TPGT; } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { param = ISCSI_FLASHNODE_CHAP_IN_IDX; } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { param = ISCSI_FLASHNODE_CHAP_OUT_IDX; } else if (attr == &dev_attr_fnode_username.attr) { param = ISCSI_FLASHNODE_USERNAME; } else if (attr == &dev_attr_fnode_username_in.attr) { param = ISCSI_FLASHNODE_USERNAME_IN; } else if (attr == &dev_attr_fnode_password.attr) { param = ISCSI_FLASHNODE_PASSWORD; } else if (attr == &dev_attr_fnode_password_in.attr) { param = ISCSI_FLASHNODE_PASSWORD_IN; } else if (attr == &dev_attr_fnode_is_boot_target.attr) { param = ISCSI_FLASHNODE_IS_BOOT_TGT; } else { WARN_ONCE(1, "Invalid flashnode session attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_sess_attr_group = { .attrs = iscsi_flashnode_sess_attrs, .is_visible = iscsi_flashnode_sess_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { &iscsi_flashnode_sess_attr_group, NULL, }; static void iscsi_flashnode_sess_release(struct device *dev) { struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); kfree(fnode_sess->targetname); kfree(fnode_sess->targetalias); kfree(fnode_sess->portal_type); kfree(fnode_sess); } static const struct device_type iscsi_flashnode_sess_dev_type = { .name = "iscsi_flashnode_sess_dev_type", .groups = iscsi_flashnode_sess_attr_groups, .release = iscsi_flashnode_sess_release, }; /* flash node connection attrs show */ #define iscsi_flashnode_conn_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_flash_conn_to_flash_session(fnode_conn);\ struct iscsi_transport *t = fnode_conn->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_conn_attr(type, name, param) \ iscsi_flashnode_conn_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node connection attributes */ iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, ISCSI_FLASHNODE_TCP_WSF_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, ISCSI_FLASHNODE_TCP_TIMER_SCALE); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); iscsi_flashnode_conn_attr(fnode, fragment_disable, ISCSI_FLASHNODE_IP_FRAG_DISABLE); iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); iscsi_flashnode_conn_attr(fnode, max_recv_dlength, ISCSI_FLASHNODE_MAX_RECV_DLENGTH); iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, ISCSI_FLASHNODE_IPV6_FLOW_LABEL); iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, ISCSI_FLASHNODE_REDIRECT_IPADDR); iscsi_flashnode_conn_attr(fnode, max_segment_size, ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); iscsi_flashnode_conn_attr(fnode, link_local_ipv6, ISCSI_FLASHNODE_LINK_LOCAL_IPV6); iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); static struct attribute *iscsi_flashnode_conn_attrs[] = { &dev_attr_fnode_is_fw_assigned_ipv6.attr, &dev_attr_fnode_header_digest.attr, &dev_attr_fnode_data_digest.attr, &dev_attr_fnode_snack_req.attr, &dev_attr_fnode_tcp_timestamp_stat.attr, &dev_attr_fnode_tcp_nagle_disable.attr, &dev_attr_fnode_tcp_wsf_disable.attr, &dev_attr_fnode_tcp_timer_scale.attr, &dev_attr_fnode_tcp_timestamp_enable.attr, &dev_attr_fnode_fragment_disable.attr, &dev_attr_fnode_max_recv_dlength.attr, &dev_attr_fnode_max_xmit_dlength.attr, &dev_attr_fnode_keepalive_tmo.attr, &dev_attr_fnode_port.attr, &dev_attr_fnode_ipaddress.attr, &dev_attr_fnode_redirect_ipaddr.attr, &dev_attr_fnode_max_segment_size.attr, &dev_attr_fnode_local_port.attr, &dev_attr_fnode_ipv4_tos.attr, &dev_attr_fnode_ipv6_traffic_class.attr, &dev_attr_fnode_ipv6_flow_label.attr, &dev_attr_fnode_link_local_ipv6.attr, &dev_attr_fnode_tcp_xmit_wsf.attr, &dev_attr_fnode_tcp_recv_wsf.attr, &dev_attr_fnode_statsn.attr, &dev_attr_fnode_exp_statsn.attr, NULL, }; static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); struct iscsi_transport *t = fnode_conn->transport; int param; if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; } else if (attr == &dev_attr_fnode_header_digest.attr) { param = ISCSI_FLASHNODE_HDR_DGST_EN; } else if (attr == &dev_attr_fnode_data_digest.attr) { param = ISCSI_FLASHNODE_DATA_DGST_EN; } else if (attr == &dev_attr_fnode_snack_req.attr) { param = ISCSI_FLASHNODE_SNACK_REQ_EN; } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; } else if (attr == &dev_attr_fnode_fragment_disable.attr) { param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { param = ISCSI_FLASHNODE_KEEPALIVE_TMO; } else if (attr == &dev_attr_fnode_port.attr) { param = ISCSI_FLASHNODE_PORT; } else if (attr == &dev_attr_fnode_ipaddress.attr) { param = ISCSI_FLASHNODE_IPADDR; } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { param = ISCSI_FLASHNODE_REDIRECT_IPADDR; } else if (attr == &dev_attr_fnode_max_segment_size.attr) { param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; } else if (attr == &dev_attr_fnode_local_port.attr) { param = ISCSI_FLASHNODE_LOCAL_PORT; } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { param = ISCSI_FLASHNODE_IPV4_TOS; } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { param = ISCSI_FLASHNODE_IPV6_TC; } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { param = ISCSI_FLASHNODE_TCP_XMIT_WSF; } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { param = ISCSI_FLASHNODE_TCP_RECV_WSF; } else if (attr == &dev_attr_fnode_statsn.attr) { param = ISCSI_FLASHNODE_STATSN; } else if (attr == &dev_attr_fnode_exp_statsn.attr) { param = ISCSI_FLASHNODE_EXP_STATSN; } else { WARN_ONCE(1, "Invalid flashnode connection attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_conn_attr_group = { .attrs = iscsi_flashnode_conn_attrs, .is_visible = iscsi_flashnode_conn_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { &iscsi_flashnode_conn_attr_group, NULL, }; static void iscsi_flashnode_conn_release(struct device *dev) { struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); kfree(fnode_conn->ipaddress); kfree(fnode_conn->redirect_ipaddr); kfree(fnode_conn->link_local_ipv6_addr); kfree(fnode_conn); } static const struct device_type iscsi_flashnode_conn_dev_type = { .name = "iscsi_flashnode_conn_dev_type", .groups = iscsi_flashnode_conn_attr_groups, .release = iscsi_flashnode_conn_release, }; static const struct bus_type iscsi_flashnode_bus; int iscsi_flashnode_bus_match(struct device *dev, const struct device_driver *drv) { if (dev->bus == &iscsi_flashnode_bus) return 1; return 0; } EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); static const struct bus_type iscsi_flashnode_bus = { .name = "iscsi_flashnode", .match = &iscsi_flashnode_bus_match, }; /** * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs * @shost: pointer to host data * @index: index of flashnode to add in sysfs * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode session attributes * * Returns: * pointer to allocated flashnode sess on success * %NULL on failure */ struct iscsi_bus_flash_session * iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_session *fnode_sess; int err; fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); if (!fnode_sess) return NULL; fnode_sess->transport = transport; fnode_sess->target_id = index; fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; fnode_sess->dev.bus = &iscsi_flashnode_bus; fnode_sess->dev.parent = &shost->shost_gendev; dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", shost->host_no, index); err = device_register(&fnode_sess->dev); if (err) goto put_dev; if (dd_size) fnode_sess->dd_data = &fnode_sess[1]; return fnode_sess; put_dev: put_device(&fnode_sess->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); /** * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs * @shost: pointer to host data * @fnode_sess: pointer to the parent flashnode session entry * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode connection attributes * * Returns: * pointer to allocated flashnode conn on success * %NULL on failure */ struct iscsi_bus_flash_conn * iscsi_create_flashnode_conn(struct Scsi_Host *shost, struct iscsi_bus_flash_session *fnode_sess, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_conn *fnode_conn; int err; fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); if (!fnode_conn) return NULL; fnode_conn->transport = transport; fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; fnode_conn->dev.bus = &iscsi_flashnode_bus; fnode_conn->dev.parent = &fnode_sess->dev; dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", shost->host_no, fnode_sess->target_id); err = device_register(&fnode_conn->dev); if (err) goto put_dev; if (dd_size) fnode_conn->dd_data = &fnode_conn[1]; return fnode_conn; put_dev: put_device(&fnode_conn->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); /** * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn * @dev: device to verify * @data: pointer to data containing value to use for verification * * Verifies if the passed device is flashnode conn device * * Returns: * 1 on success * 0 on failure */ static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data) { return dev->bus == &iscsi_flashnode_bus; } static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) { device_unregister(&fnode_conn->dev); return 0; } static int flashnode_match_index(struct device *dev, const void *data) { struct iscsi_bus_flash_session *fnode_sess = NULL; int ret = 0; if (!iscsi_flashnode_bus_match(dev, NULL)) goto exit_match_index; fnode_sess = iscsi_dev_to_flash_session(dev); ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0; exit_match_index: return ret; } /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data * @idx: index to match * * Finds the flashnode session object for the passed index * * Returns: * pointer to found flashnode session object on success * %NULL on failure */ static struct iscsi_bus_flash_session * iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; dev = device_find_child(&shost->shost_gendev, &idx, flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); return fnode_sess; } /** * iscsi_find_flashnode_sess - finds flashnode session entry * @shost: pointer to host data * @data: pointer to data containing value to use for comparison * @fn: function pointer that does actual comparison * * Finds the flashnode session object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode session device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data, device_match_t fn) { return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode connection device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { return device_find_child(&fnode_sess->dev, NULL, iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) { if (!iscsi_is_flashnode_conn_dev(dev, NULL)) return 0; return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); } /** * iscsi_destroy_flashnode_sess - destroy flashnode session entry * @fnode_sess: pointer to flashnode session entry to be destroyed * * Deletes the flashnode session entry and all children flashnode connection * entries from sysfs */ void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) { int err; err = device_for_each_child(&fnode_sess->dev, NULL, iscsi_iter_destroy_flashnode_conn_fn); if (err) pr_err("Could not delete all connections for %s. Error %d.\n", fnode_sess->dev.kobj.name, err); device_unregister(&fnode_sess->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) { if (!iscsi_flashnode_bus_match(dev, NULL)) return 0; iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); return 0; } /** * iscsi_destroy_all_flashnode - destroy all flashnode session entries * @shost: pointer to host data * * Destroys all the flashnode session entries and all corresponding children * flashnode connection entries from sysfs */ void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) { device_for_each_child(&shost->shost_gendev, NULL, iscsi_iter_destroy_flashnode_fn); } EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); /* * BSG support */ /** * iscsi_bsg_host_dispatch - Dispatch command to LLD. * @job: bsg job to be processed */ static int iscsi_bsg_host_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = iscsi_job_to_shost(job); struct iscsi_bsg_request *req = job->request; struct iscsi_bsg_reply *reply = job->reply; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; /* check if we have the msgcode value at least */ if (job->request_len < sizeof(uint32_t)) { ret = -ENOMSG; goto fail_host_msg; } /* Validate the host command */ switch (req->msgcode) { case ISCSI_BSG_HST_VENDOR: cmdlen += sizeof(struct iscsi_bsg_host_vendor); if ((shost->hostt->vendor_id == 0L) || (req->rqst_data.h_vendor.vendor_id != shost->hostt->vendor_id)) { ret = -ESRCH; goto fail_host_msg; } break; default: ret = -EBADR; goto fail_host_msg; } /* check if we really have all the request data needed */ if (job->request_len < cmdlen) { ret = -ENOMSG; goto fail_host_msg; } ret = i->iscsi_transport->bsg_request(job); if (!ret) return 0; fail_host_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); reply->reply_payload_rcv_len = 0; reply->result = ret; job->reply_len = sizeof(uint32_t); bsg_job_done(job, ret, 0); return 0; } /** * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests * @shost: shost for iscsi_host * @ihost: iscsi_cls_host adding the structures to */ static int iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) { struct device *dev = &shost->shost_gendev; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); struct queue_limits lim; struct request_queue *q; char bsg_name[20]; if (!i->iscsi_transport->bsg_request) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); scsi_init_limits(shost, &lim); q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL, 0); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); return PTR_ERR(q); } ihost->bsg_q = q; return 0; } static int iscsi_setup_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; memset(ihost, 0, sizeof(*ihost)); mutex_init(&ihost->mutex); iscsi_bsg_host_add(shost, ihost); /* ignore any bsg add error - we just can't do sgio */ return 0; } static int iscsi_remove_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; bsg_remove_queue(ihost->bsg_q); return 0; } static DECLARE_TRANSPORT_CLASS(iscsi_host_class, "iscsi_host", iscsi_setup_host, iscsi_remove_host, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_session_class, "iscsi_session", NULL, NULL, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, "iscsi_connection", NULL, NULL, NULL); static struct sock *nls; static DEFINE_MUTEX(rx_queue_mutex); static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); static DEFINE_SPINLOCK(connlock); static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn) { struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent); return sess->sid; } /* * Returns the matching session to a given sid */ static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid) { unsigned long flags; struct iscsi_cls_session *sess; spin_lock_irqsave(&sesslock, flags); list_for_each_entry(sess, &sesslist, sess_list) { if (sess->sid == sid) { spin_unlock_irqrestore(&sesslock, flags); return sess; } } spin_unlock_irqrestore(&sesslock, flags); return NULL; } /* * Returns the matching connection to a given sid / cid tuple */ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid) { unsigned long flags; struct iscsi_cls_conn *conn; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) { spin_unlock_irqrestore(&connlock, flags); return conn; } } spin_unlock_irqrestore(&connlock, flags); return NULL; } /* * The following functions can be used by LLDs that allocate * their own scsi_hosts or by software iscsi LLDs */ static struct { int value; char *name; } iscsi_session_state_names[] = { { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" }, { ISCSI_SESSION_FAILED, "FAILED" }, { ISCSI_SESSION_FREE, "FREE" }, }; static const char *iscsi_session_state_name(int state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) { if (iscsi_session_state_names[i].value == state) { name = iscsi_session_state_names[i].name; break; } } return name; } static char *iscsi_session_target_state_name[] = { [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", }; int iscsi_session_chkready(struct iscsi_cls_session *session) { int err; switch (session->state) { case ISCSI_SESSION_LOGGED_IN: err = 0; break; case ISCSI_SESSION_FAILED: err = DID_IMM_RETRY << 16; break; case ISCSI_SESSION_FREE: err = DID_TRANSPORT_FAILFAST << 16; break; default: err = DID_NO_CONNECT << 16; break; } return err; } EXPORT_SYMBOL_GPL(iscsi_session_chkready); int iscsi_is_session_online(struct iscsi_cls_session *session) { unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_LOGGED_IN) ret = 1; spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_is_session_online); static void iscsi_session_release(struct device *dev) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev); struct Scsi_Host *shost; shost = iscsi_session_to_shost(session); scsi_host_put(shost); ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); kfree(session); } int iscsi_is_session_dev(const struct device *dev) { return dev->release == iscsi_session_release; } EXPORT_SYMBOL_GPL(iscsi_is_session_dev); static int iscsi_iter_session_fn(struct device *dev, void *data) { void (* fn) (struct iscsi_cls_session *) = data; if (!iscsi_is_session_dev(dev)) return 0; fn(iscsi_dev_to_session(dev)); return 0; } void iscsi_host_for_each_session(struct Scsi_Host *shost, void (*fn)(struct iscsi_cls_session *)) { device_for_each_child(&shost->shost_gendev, fn, iscsi_iter_session_fn); } EXPORT_SYMBOL_GPL(iscsi_host_for_each_session); struct iscsi_scan_data { unsigned int channel; unsigned int id; u64 lun; enum scsi_scan_mode rescan; }; static int iscsi_user_scan_session(struct device *dev, void *data) { struct iscsi_scan_data *scan_data = data; struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_cls_host *ihost; unsigned long flags; unsigned int id; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n"); shost = iscsi_session_to_shost(session); ihost = shost->shost_data; mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->state != ISCSI_SESSION_LOGGED_IN) { spin_unlock_irqrestore(&session->lock, flags); goto user_scan_exit; } id = session->target_id; spin_unlock_irqrestore(&session->lock, flags); if (id != ISCSI_MAX_TARGET) { if ((scan_data->channel == SCAN_WILD_CARD || scan_data->channel == 0) && (scan_data->id == SCAN_WILD_CARD || scan_data->id == id)) { scsi_scan_target(&session->dev, 0, id, scan_data->lun, scan_data->rescan); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_SCANNED; spin_unlock_irqrestore(&session->lock, flags); } } user_scan_exit: mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n"); return 0; } static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct iscsi_scan_data scan_data; scan_data.channel = channel; scan_data.id = id; scan_data.lun = lun; scan_data.rescan = SCSI_SCAN_MANUAL; return device_for_each_child(&shost->shost_gendev, &scan_data, iscsi_user_scan_session); } static void iscsi_scan_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, scan_work); struct iscsi_scan_data scan_data; scan_data.channel = 0; scan_data.id = SCAN_WILD_CARD; scan_data.lun = SCAN_WILD_CARD; scan_data.rescan = SCSI_SCAN_RESCAN; iscsi_user_scan_session(&session->dev, &scan_data); } /** * iscsi_block_scsi_eh - block scsi eh until session state has transistioned * @cmd: scsi cmd passed to scsi eh handler * * If the session is down this function will wait for the recovery * timer to fire or for the session to be logged back in. If the * recovery timer fires then FAST_IO_FAIL is returned. The caller * should pass this error value to the scsi eh. */ int iscsi_block_scsi_eh(struct scsi_cmnd *cmd) { struct iscsi_cls_session *session = starget_to_session(scsi_target(cmd->device)); unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); while (session->state != ISCSI_SESSION_LOGGED_IN) { if (session->state == ISCSI_SESSION_FREE) { ret = FAST_IO_FAIL; break; } spin_unlock_irqrestore(&session->lock, flags); msleep(1000); spin_lock_irqsave(&session->lock, flags); } spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh); static void session_recovery_timedout(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, recovery_work.work); unsigned long flags; iscsi_cls_session_printk(KERN_INFO, session, "session recovery timed out after %d secs\n", session->recovery_tmo); spin_lock_irqsave(&session->lock, flags); switch (session->state) { case ISCSI_SESSION_FAILED: session->state = ISCSI_SESSION_FREE; break; case ISCSI_SESSION_LOGGED_IN: case ISCSI_SESSION_FREE: /* we raced with the unblock's flush */ spin_unlock_irqrestore(&session->lock, flags); return; } spin_unlock_irqrestore(&session->lock, flags); ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); if (session->transport->session_recovery_timedout) session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unblock_work); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n"); cancel_delayed_work_sync(&session->recovery_work); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_LOGGED_IN; spin_unlock_irqrestore(&session->lock, flags); /* start IO */ scsi_target_unblock(&session->dev, SDEV_RUNNING); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n"); } /** * iscsi_unblock_session - set a session as logged in and start IO. * @session: iscsi session * * Mark a session as ready to accept IO. */ void iscsi_unblock_session(struct iscsi_cls_session *session) { if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); queue_work(session->workq, &session->unblock_work); /* * Blocking the session can be done from any context so we only * queue the block work. Make sure the unblock work has completed * because it flushes/cancels the other works and updates the state. */ flush_work(&session->unblock_work); } EXPORT_SYMBOL_GPL(iscsi_unblock_session); static void __iscsi_block_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, block_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); scsi_block_targets(shost, &session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); if (session->recovery_tmo >= 0) queue_delayed_work(session->workq, &session->recovery_work, session->recovery_tmo * HZ); } void iscsi_block_session(struct iscsi_cls_session *session) { queue_work(session->workq, &session->block_work); } EXPORT_SYMBOL_GPL(iscsi_block_session); static void __iscsi_unbind_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unbind_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; unsigned int target_id; bool remove_target = true; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { remove_target = false; } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Skipping target unbinding: Session is unbound/unbinding.\n"); return; } session->target_state = ISCSI_SESSION_TARGET_UNBINDING; target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); if (remove_target) scsi_remove_target(&session->dev); if (session->ida_used) ida_free(&iscsi_sess_ida, target_id); iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_UNBOUND; spin_unlock_irqrestore(&session->lock, flags); } static void __iscsi_destroy_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, destroy_work); session->transport->destroy_session(session); } struct iscsi_cls_session * iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, int dd_size) { struct iscsi_cls_session *session; session = kzalloc(sizeof(*session) + dd_size, GFP_KERNEL); if (!session) return NULL; session->transport = transport; session->creator = -1; session->recovery_tmo = 120; session->recovery_tmo_sysfs_override = false; session->state = ISCSI_SESSION_FREE; INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); INIT_LIST_HEAD(&session->sess_list); INIT_WORK(&session->unblock_work, __iscsi_unblock_session); INIT_WORK(&session->block_work, __iscsi_block_session); INIT_WORK(&session->unbind_work, __iscsi_unbind_session); INIT_WORK(&session->scan_work, iscsi_scan_session); INIT_WORK(&session->destroy_work, __iscsi_destroy_session); spin_lock_init(&session->lock); /* this is released in the dev's release function */ scsi_host_get(shost); session->dev.parent = &shost->shost_gendev; session->dev.release = iscsi_session_release; device_initialize(&session->dev); if (dd_size) session->dd_data = &session[1]; ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n"); return session; } EXPORT_SYMBOL_GPL(iscsi_alloc_session); int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; int id = 0; int err; session->sid = atomic_add_return(1, &iscsi_session_nr); session->workq = alloc_workqueue("iscsi_ctrl_%d:%d", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, shost->host_no, session->sid); if (!session->workq) return -ENOMEM; if (target_id == ISCSI_MAX_TARGET) { id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL); if (id < 0) { iscsi_cls_session_printk(KERN_ERR, session, "Failure in Target ID Allocation\n"); err = id; goto destroy_wq; } session->target_id = (unsigned int)id; session->ida_used = true; } else session->target_id = target_id; spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; spin_unlock_irqrestore(&session->lock, flags); dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register session's dev\n"); goto release_ida; } err = transport_register_device(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); goto release_dev; } spin_lock_irqsave(&sesslock, flags); list_add(&session->sess_list, &sesslist); spin_unlock_irqrestore(&sesslock, flags); iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; release_dev: device_del(&session->dev); release_ida: if (session->ida_used) ida_free(&iscsi_sess_ida, session->target_id); destroy_wq: destroy_workqueue(session->workq); return err; } EXPORT_SYMBOL_GPL(iscsi_add_session); static void iscsi_conn_release(struct device *dev) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); struct device *parent = conn->dev.parent; ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n"); kfree(conn); put_device(parent); } static int iscsi_is_conn_dev(const struct device *dev) { return dev->release == iscsi_conn_release; } static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) { if (!iscsi_is_conn_dev(dev)) return 0; iscsi_remove_conn(iscsi_dev_to_conn(dev)); iscsi_put_conn(iscsi_dev_to_conn(dev)); return 0; } void iscsi_remove_session(struct iscsi_cls_session *session) { unsigned long flags; int err; ISCSI_DBG_TRANS_SESSION(session, "Removing session\n"); spin_lock_irqsave(&sesslock, flags); if (!list_empty(&session->sess_list)) list_del(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); cancel_work_sync(&session->unblock_work); /* * If we are blocked let commands flow again. The lld or iscsi * layer should set up the queuecommand to fail commands. * We assume that LLD will not be calling block/unblock while * removing the session. */ spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FREE; spin_unlock_irqrestore(&session->lock, flags); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* * qla4xxx can perform it's own scans when it runs in kernel only * mode. Make sure to flush those scans. */ flush_work(&session->scan_work); /* flush running unbind operations */ flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ err = device_for_each_child(&session->dev, NULL, iscsi_iter_destroy_conn_fn); if (err) iscsi_cls_session_printk(KERN_ERR, session, "Could not delete all connections " "for session. Error %d.\n", err); transport_unregister_device(&session->dev); destroy_workqueue(session->workq); ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n"); device_del(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_session); static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); switch (flag) { case STOP_CONN_RECOVER: WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); break; case STOP_CONN_TERM: WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); break; default: iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", flag); return; } conn->transport->stop_conn(conn, flag); ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); } static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) { struct iscsi_cls_session *session = iscsi_conn_to_session(conn); struct iscsi_endpoint *ep; ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); if (!conn->ep || !session->transport->ep_disconnect) return; ep = conn->ep; conn->ep = NULL; session->transport->unbind_conn(conn, is_active); session->transport->ep_disconnect(ep); ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); } static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, struct iscsi_endpoint *ep, bool is_active) { /* Check if this was a conn error and the kernel took ownership */ spin_lock_irq(&conn->lock); if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_ep_disconnect(conn, is_active); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); mutex_unlock(&conn->ep_mutex); flush_work(&conn->cleanup_work); /* * Userspace is now done with the EP so we can release the ref * iscsi_cleanup_conn_work_fn took. */ iscsi_put_endpoint(ep); mutex_lock(&conn->ep_mutex); } } static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); /* * For offload, iscsid may not know about the ep like when iscsid is * restarted or for kernel based session shutdown iscsid is not even * up. For these cases, we do the disconnect now. */ mutex_lock(&conn->ep_mutex); if (conn->ep) iscsi_if_disconnect_bound_ep(conn, conn->ep, true); mutex_unlock(&conn->ep_mutex); /* * If this is a termination we have to call stop_conn with that flag * so the correct states get set. If we haven't run the work yet try to * avoid the extra run. */ if (flag == STOP_CONN_TERM) { cancel_work_sync(&conn->cleanup_work); iscsi_stop_conn(conn, flag); } else { /* * Figure out if it was the kernel or userspace initiating this. */ spin_lock_irq(&conn->lock); if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_stop_conn(conn, flag); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); flush_work(&conn->cleanup_work); } /* * Only clear for recovery to avoid extra cleanup runs during * termination. */ spin_lock_irq(&conn->lock); clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); spin_unlock_irq(&conn->lock); } ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); return 0; } static void iscsi_cleanup_conn_work_fn(struct work_struct *work) { struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, cleanup_work); struct iscsi_cls_session *session = iscsi_conn_to_session(conn); mutex_lock(&conn->ep_mutex); /* * Get a ref to the ep, so we don't release its ID until after * userspace is done referencing it in iscsi_if_disconnect_bound_ep. */ if (conn->ep) get_device(&conn->ep->dev); iscsi_ep_disconnect(conn, false); if (system_state != SYSTEM_RUNNING) { /* * If the user has set up for the session to never timeout * then hang like they wanted. For all other cases fail right * away since userspace is not going to relogin. */ if (session->recovery_tmo > 0) session->recovery_tmo = 0; } iscsi_stop_conn(conn, STOP_CONN_RECOVER); mutex_unlock(&conn->ep_mutex); ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); } static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data) { struct iscsi_transport *transport; struct iscsi_cls_conn *conn; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); transport = conn->transport; if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN) iscsi_if_stop_conn(conn, STOP_CONN_TERM); transport->destroy_conn(conn); return 0; } /** * iscsi_force_destroy_session - destroy a session from the kernel * @session: session to destroy * * Force the destruction of a session from the kernel. This should only be * used when userspace is no longer running during system shutdown. */ void iscsi_force_destroy_session(struct iscsi_cls_session *session) { struct iscsi_transport *transport = session->transport; unsigned long flags; WARN_ON_ONCE(system_state == SYSTEM_RUNNING); spin_lock_irqsave(&sesslock, flags); if (list_empty(&session->sess_list)) { spin_unlock_irqrestore(&sesslock, flags); /* * Conn/ep is already freed. Session is being torn down via * async path. For shutdown we don't care about it so return. */ return; } spin_unlock_irqrestore(&sesslock, flags); device_for_each_child(&session->dev, NULL, iscsi_iter_force_destroy_conn_fn); transport->destroy_session(session); } EXPORT_SYMBOL_GPL(iscsi_force_destroy_session); void iscsi_free_session(struct iscsi_cls_session *session) { ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); put_device(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_free_session); /** * iscsi_alloc_conn - alloc iscsi class connection * @session: iscsi cls session * @dd_size: private driver data size * @cid: connection id */ struct iscsi_cls_conn * iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) { struct iscsi_transport *transport = session->transport; struct iscsi_cls_conn *conn; conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); if (!conn) return NULL; if (dd_size) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) goto free_conn; dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid); device_initialize(&conn->dev); conn->dev.parent = &session->dev; conn->dev.release = iscsi_conn_release; return conn; free_conn: kfree(conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_alloc_conn); /** * iscsi_add_conn - add iscsi class connection * @conn: iscsi cls connection * * This will expose iscsi_cls_conn to sysfs so make sure the related * resources for sysfs attributes are initialized before calling this. */ int iscsi_add_conn(struct iscsi_cls_conn *conn) { int err; unsigned long flags; struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent); err = device_add(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register connection's dev\n"); return err; } err = transport_register_device(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); device_del(&conn->dev); return err; } spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); spin_unlock_irqrestore(&connlock, flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_add_conn); /** * iscsi_remove_conn - remove iscsi class connection from sysfs * @conn: iscsi cls connection * * Remove iscsi_cls_conn from sysfs, and wait for previous * read/write of iscsi_cls_conn's attributes in sysfs to finish. */ void iscsi_remove_conn(struct iscsi_cls_conn *conn) { unsigned long flags; spin_lock_irqsave(&connlock, flags); list_del(&conn->conn_list); spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); device_del(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_conn); void iscsi_put_conn(struct iscsi_cls_conn *conn) { put_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_put_conn); void iscsi_get_conn(struct iscsi_cls_conn *conn) { get_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_get_conn); /* * iscsi interface functions */ static struct iscsi_internal * iscsi_if_transport_lookup(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; spin_lock_irqsave(&iscsi_transport_lock, flags); list_for_each_entry(priv, &iscsi_transports, list) { if (tt == priv->iscsi_transport) { spin_unlock_irqrestore(&iscsi_transport_lock, flags); return priv; } } spin_unlock_irqrestore(&iscsi_transport_lock, flags); return NULL; } static int iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) { return nlmsg_multicast(nls, skb, 0, group, gfp); } static int iscsi_unicast_skb(struct sk_buff *skb, u32 portid) { return nlmsg_unicast(nls, skb, portid); } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; char *pdu; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + data_size); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return -EINVAL; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED); iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " "control PDU: OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_RECV_PDU; ev->r.recv_req.cid = conn->cid; ev->r.recv_req.sid = iscsi_conn_get_sid(conn); pdu = (char*)ev + sizeof(*ev); memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); int iscsi_offload_mesg(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t type, char *data, uint16_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->type = type; ev->transport_handle = iscsi_handle(transport); switch (type) { case ISCSI_KEVENT_PATH_REQ: ev->r.req_path.host_no = shost->host_no; break; case ISCSI_KEVENT_IF_DOWN: ev->r.notify_if_down.host_no = shost->host_no; break; } memcpy((char *)ev + sizeof(*ev), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); unsigned long flags; int state; spin_lock_irqsave(&conn->lock, flags); /* * Userspace will only do a stop call if we are at least bound. And, we * only need to do the in kernel cleanup if in the UP state so cmds can * be released to upper layers. If in other states just wait for * userspace to avoid races that can leave the cleanup_work queued. */ state = READ_ONCE(conn->state); switch (state) { case ISCSI_CONN_BOUND: case ISCSI_CONN_UP: if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work); } break; default: ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", state); break; } spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn error (%d)\n", error); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; ev->r.connerror.error = error; ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); } EXPORT_SYMBOL_GPL(iscsi_conn_error_event); void iscsi_conn_login_event(struct iscsi_cls_conn *conn, enum iscsi_conn_state state) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn login (%d)\n", state); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; ev->r.conn_login.state = state; ev->r.conn_login.cid = conn->cid; ev->r.conn_login.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n", state); } EXPORT_SYMBOL_GPL(iscsi_conn_login_event); void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, enum iscsi_host_event_code code, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", host_no, code); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_HOST_EVENT; ev->r.host_event.host_no = host_no; ev->r.host_event.code = code; ev->r.host_event.data_size = data_size; if (data_size) memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_post_host_event); void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_PING_COMP; ev->r.ping_comp.host_no = host_no; ev->r.ping_comp.status = status; ev->r.ping_comp.pid = pid; ev->r.ping_comp.data_size = data_size; memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "Could not allocate skb to send reply.\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); return iscsi_unicast_skb(skb, portid); } static int iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_stats *stats; struct sk_buff *skbstat; struct iscsi_cls_conn *conn; struct nlmsghdr *nlhstat; struct iscsi_uevent *evstat; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * ISCSI_STATS_CUSTOM_MAX); int err = 0; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid); if (!conn) return -EEXIST; do { int actual_size; skbstat = alloc_skb(len, GFP_ATOMIC); if (!skbstat) { iscsi_cls_conn_printk(KERN_ERR, conn, "can not " "deliver stats: OOM\n"); return -ENOMEM; } nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = nlmsg_data(nlhstat); memset(evstat, 0, sizeof(*evstat)); evstat->transport_handle = iscsi_handle(conn->transport); evstat->type = nlh->nlmsg_type; evstat->u.get_stats.cid = ev->u.get_stats.cid; evstat->u.get_stats.sid = ev->u.get_stats.sid; stats = (struct iscsi_stats *) ((char*)evstat + sizeof(*evstat)); memset(stats, 0, sizeof(*stats)); transport->get_stats(conn, stats); actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * stats->custom_length); actual_size -= sizeof(*nlhstat); actual_size = nlmsg_msg_size(actual_size); skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } while (err < 0 && err != -ECONNREFUSED); return err; } /** * iscsi_session_event - send session destr. completion event * @session: iscsi class session * @event: type of event */ int iscsi_session_event(struct iscsi_cls_session *session, enum iscsi_uevent_e event) { struct iscsi_internal *priv; struct Scsi_Host *shost; struct iscsi_uevent *ev; struct sk_buff *skb; struct nlmsghdr *nlh; int rc, len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(session->transport); if (!priv) return -EINVAL; shost = iscsi_session_to_shost(session); skb = alloc_skb(len, GFP_KERNEL); if (!skb) { iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u\n", event); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(session->transport); ev->type = event; switch (event) { case ISCSI_KEVENT_DESTROY_SESSION: ev->r.d_session.host_no = shost->host_no; ev->r.d_session.sid = session->sid; break; case ISCSI_KEVENT_CREATE_SESSION: ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; break; case ISCSI_KEVENT_UNBIND_SESSION: ev->r.unbind_session.host_no = shost->host_no; ev->r.unbind_session.sid = session->sid; break; default: iscsi_cls_session_printk(KERN_ERR, session, "Invalid event " "%u.\n", event); kfree_skb(skb); return -EINVAL; } /* * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u. Check iscsi daemon\n", event); ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n", event, rc); return rc; } EXPORT_SYMBOL_GPL(iscsi_session_event); static int iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, struct iscsi_uevent *ev, pid_t pid, uint32_t initial_cmdsn, uint16_t cmds_max, uint16_t queue_depth) { struct iscsi_transport *transport = priv->iscsi_transport; struct iscsi_cls_session *session; struct Scsi_Host *shost; session = transport->create_session(ep, cmds_max, queue_depth, initial_cmdsn); if (!session) return -ENOMEM; session->creator = pid; shost = iscsi_session_to_shost(session); ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; ISCSI_DBG_TRANS_SESSION(session, "Completed creating transport session\n"); return 0; } static int iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; session = iscsi_session_lookup(ev->u.c_conn.sid); if (!session) { printk(KERN_ERR "iscsi: invalid session %d.\n", ev->u.c_conn.sid); return -EINVAL; } conn = transport->create_conn(session, ev->u.c_conn.cid); if (!conn) { iscsi_cls_session_printk(KERN_ERR, session, "couldn't create a new connection."); return -ENOMEM; } ev->r.c_conn_ret.sid = session->sid; ev->r.c_conn_ret.cid = conn->cid; ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n"); return 0; } static int iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); if (!conn) return -EINVAL; ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); flush_work(&conn->cleanup_work); ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); if (transport->destroy_conn) transport->destroy_conn(conn); return 0; } static int iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; int err = 0, value = 0, state; if (ev->u.set_param.len > rlen || ev->u.set_param.len > PAGE_SIZE) return -EINVAL; session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) return -EINVAL; /* data will be regarded as NULL-ended string, do length check */ if (strlen(data) > ev->u.set_param.len) return -EINVAL; switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); if (!session->recovery_tmo_sysfs_override) session->recovery_tmo = value; break; default: state = READ_ONCE(conn->state); if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { return -ENOTCONN; } } return err; } static int iscsi_if_ep_connect(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; struct sockaddr *dst_addr; struct Scsi_Host *shost = NULL; int non_blocking, err = 0; if (!transport->ep_connect) return -EINVAL; if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); if (!shost) { printk(KERN_ERR "ep connect failed. Could not find " "host no %u\n", ev->u.ep_connect_through_host.host_no); return -ENODEV; } non_blocking = ev->u.ep_connect_through_host.non_blocking; } else non_blocking = ev->u.ep_connect.non_blocking; dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); ep = transport->ep_connect(shost, dst_addr, non_blocking); if (IS_ERR(ep)) { err = PTR_ERR(ep); goto release_host; } ev->r.ep_connect_ret.handle = ep->id; release_host: if (shost) scsi_host_put(shost); return err; } static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, u64 ep_handle) { struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep; if (!transport->ep_disconnect) return -EINVAL; ep = iscsi_lookup_endpoint(ep_handle); if (!ep) return -EINVAL; conn = ep->conn; if (!conn) { /* * conn was not even bound yet, so we can't get iscsi conn * failures yet. */ transport->ep_disconnect(ep); goto put_ep; } mutex_lock(&conn->ep_mutex); iscsi_if_disconnect_bound_ep(conn, ep, false); mutex_unlock(&conn->ep_mutex); put_ep: iscsi_put_endpoint(ep); return 0; } static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type, u32 rlen) { struct iscsi_endpoint *ep; int rc = 0; switch (msg_type) { case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: if (rlen < sizeof(struct sockaddr)) rc = -EINVAL; else rc = iscsi_if_ep_connect(transport, ev, msg_type); break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) return -EINVAL; ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); if (!ep) return -EINVAL; ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: rc = iscsi_if_ep_disconnect(transport, ev->u.ep_disconnect.ep_handle); break; } return rc; } static int iscsi_tgt_dscvr(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->tgt_dscvr) return -EINVAL; shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); if (!shost) { printk(KERN_ERR "target discovery could not find host no %u\n", ev->u.tgt_dscvr.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type, ev->u.tgt_dscvr.enable, dst_addr); scsi_host_put(shost); return err; } static int iscsi_set_host_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_host_param) return -ENOSYS; if (ev->u.set_host_param.len > rlen || ev->u.set_host_param.len > PAGE_SIZE) return -EINVAL; shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", ev->u.set_host_param.host_no); return -ENODEV; } /* see similar check in iscsi_if_set_param() */ if (strlen(data) > ev->u.set_host_param.len) { err = -EINVAL; goto out; } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); out: scsi_host_put(shost); return err; } static int iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct iscsi_path *params; int err; if (rlen < sizeof(*params)) return -EINVAL; if (!transport->set_path) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { printk(KERN_ERR "set path could not find host no %u\n", ev->u.set_path.host_no); return -ENODEV; } params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); err = transport->set_path(shost, params); scsi_host_put(shost); return err; } static int iscsi_session_has_conns(int sid) { struct iscsi_cls_conn *conn; unsigned long flags; int found = 0; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if (iscsi_conn_get_sid(conn) == sid) { found = 1; break; } } spin_unlock_irqrestore(&connlock, flags); return found; } static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_iface_param) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_iface_params.host_no); if (!shost) { printk(KERN_ERR "set_iface_params could not find host no %u\n", ev->u.set_iface_params.host_no); return -ENODEV; } err = transport->set_iface_param(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->send_ping) return -ENOSYS; shost = scsi_host_lookup(ev->u.iscsi_ping.host_no); if (!shost) { printk(KERN_ERR "iscsi_ping could not find host no %u\n", ev->u.iscsi_ping.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev)); err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num, ev->u.iscsi_ping.iface_type, ev->u.iscsi_ping.payload_size, ev->u.iscsi_ping.pid, dst_addr); scsi_host_put(shost); return err; } static int iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_chap_rec *chap_rec; struct iscsi_internal *priv; struct sk_buff *skbchap; struct nlmsghdr *nlhchap; struct iscsi_uevent *evchap; uint32_t chap_buf_size; int len, err = 0; char *buf; if (!transport->get_chap) return -EINVAL; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); len = nlmsg_total_size(sizeof(*ev) + chap_buf_size); shost = scsi_host_lookup(ev->u.get_chap.host_no); if (!shost) { printk(KERN_ERR "%s: failed. Could not find host no %u\n", __func__, ev->u.get_chap.host_no); return -ENODEV; } do { int actual_size; skbchap = alloc_skb(len, GFP_KERNEL); if (!skbchap) { printk(KERN_ERR "can not deliver chap: OOM\n"); err = -ENOMEM; goto exit_get_chap; } nlhchap = __nlmsg_put(skbchap, 0, 0, 0, (len - sizeof(*nlhchap)), 0); evchap = nlmsg_data(nlhchap); memset(evchap, 0, sizeof(*evchap)); evchap->transport_handle = iscsi_handle(transport); evchap->type = nlh->nlmsg_type; evchap->u.get_chap.host_no = ev->u.get_chap.host_no; evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; buf = (char *)evchap + sizeof(*evchap); memset(buf, 0, chap_buf_size); err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, &evchap->u.get_chap.num_entries, buf); actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); skb_trim(skbchap, NLMSG_ALIGN(actual_size)); nlhchap->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_get_chap: scsi_host_put(shost); return err; } static int iscsi_set_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err = 0; if (!transport->set_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_path.host_no); return -ENODEV; } err = transport->set_chap(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_delete_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; int err = 0; if (!transport->delete_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.delete_chap.host_no); if (!shost) { printk(KERN_ERR "%s could not find host no %u\n", __func__, ev->u.delete_chap.host_no); return -ENODEV; } err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx); scsi_host_put(shost); return err; } static const struct { enum iscsi_discovery_parent_type value; char *name; } iscsi_discovery_parent_names[] = { {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, {ISCSI_DISC_PARENT_ISNS, "isns" }, }; char *iscsi_get_discovery_parent_name(int parent_type) { int i; char *state = "Unknown!"; for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { if (iscsi_discovery_parent_names[i].value & parent_type) { state = iscsi_discovery_parent_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { err = -ENOSYS; goto exit_set_fnode; } shost = scsi_host_lookup(ev->u.set_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_set_fnode: return err; } static int iscsi_new_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int index; int err = 0; if (!transport->new_flashnode) { err = -ENOSYS; goto exit_new_fnode; } shost = scsi_host_lookup(ev->u.new_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.new_flashnode.host_no); err = -ENODEV; goto exit_new_fnode; } index = transport->new_flashnode(shost, data, len); if (index >= 0) ev->r.new_flashnode_ret.flashnode_idx = index; else err = -EIO; scsi_host_put(shost); exit_new_fnode: return err; } static int iscsi_del_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; uint32_t idx; int err = 0; if (!transport->del_flashnode) { err = -ENOSYS; goto exit_del_fnode; } shost = scsi_host_lookup(ev->u.del_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.del_flashnode.host_no); err = -ENODEV; goto exit_del_fnode; } idx = ev->u.del_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_del_fnode: return err; } static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->login_flashnode) { err = -ENOSYS; goto exit_login_fnode; } shost = scsi_host_lookup(ev->u.login_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.login_flashnode.host_no); err = -ENODEV; goto exit_login_fnode; } idx = ev->u.login_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_login_fnode: return err; } static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->logout_flashnode) { err = -ENOSYS; goto exit_logout_fnode; } shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto exit_logout_fnode; } idx = ev->u.logout_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_logout_fnode: return err; } static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_cls_session *session; int err = 0; if (!transport->logout_flashnode_sid) { err = -ENOSYS; goto exit_logout_sid; } shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto exit_logout_sid; } session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); if (!session) { pr_err("%s could not find session id %u\n", __func__, ev->u.logout_flashnode_sid.sid); err = -EINVAL; goto put_host; } err = transport->logout_flashnode_sid(session); put_host: scsi_host_put(shost); exit_logout_sid: return err; } static int iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_internal *priv; struct sk_buff *skbhost_stats; struct nlmsghdr *nlhhost_stats; struct iscsi_uevent *evhost_stats; int host_stats_size = 0; int len, err = 0; char *buf; if (!transport->get_host_stats) return -ENOSYS; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; host_stats_size = sizeof(struct iscsi_offload_host_stats); len = nlmsg_total_size(sizeof(*ev) + host_stats_size); shost = scsi_host_lookup(ev->u.get_host_stats.host_no); if (!shost) { pr_err("%s: failed. Could not find host no %u\n", __func__, ev->u.get_host_stats.host_no); return -ENODEV; } do { int actual_size; skbhost_stats = alloc_skb(len, GFP_KERNEL); if (!skbhost_stats) { pr_err("cannot deliver host stats: OOM\n"); err = -ENOMEM; goto exit_host_stats; } nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, (len - sizeof(*nlhhost_stats)), 0); evhost_stats = nlmsg_data(nlhhost_stats); memset(evhost_stats, 0, sizeof(*evhost_stats)); evhost_stats->transport_handle = iscsi_handle(transport); evhost_stats->type = nlh->nlmsg_type; evhost_stats->u.get_host_stats.host_no = ev->u.get_host_stats.host_no; buf = (char *)evhost_stats + sizeof(*evhost_stats); memset(buf, 0, host_stats_size); err = transport->get_host_stats(shost, buf, host_stats_size); if (err) { kfree_skb(skbhost_stats); goto exit_host_stats; } actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); nlhhost_stats->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_host_stats: scsi_host_put(shost); return err; } static int iscsi_if_transport_conn(struct iscsi_transport *transport, struct nlmsghdr *nlh, u32 pdu_len) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_cls_session *session; struct iscsi_cls_conn *conn = NULL; struct iscsi_endpoint *ep; int err = 0; switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_CONN: return iscsi_if_create_conn(transport, ev); case ISCSI_UEVENT_DESTROY_CONN: return iscsi_if_destroy_conn(transport, ev); case ISCSI_UEVENT_STOP_CONN: conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); if (!conn) return -EINVAL; return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); } /* * The following cmds need to be run under the ep_mutex so in kernel * conn cleanup (ep_disconnect + unbind and conn) is not done while * these are running. They also must not run if we have just run a conn * cleanup because they would set the state in a way that might allow * IO or send IO themselves. */ switch (nlh->nlmsg_type) { case ISCSI_UEVENT_START_CONN: conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); break; case ISCSI_UEVENT_BIND_CONN: conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); break; case ISCSI_UEVENT_SEND_PDU: conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); break; } if (!conn) return -EINVAL; mutex_lock(&conn->ep_mutex); spin_lock_irq(&conn->lock); if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); mutex_unlock(&conn->ep_mutex); ev->r.retcode = -ENOTCONN; return 0; } spin_unlock_irq(&conn->lock); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_BIND_CONN: session = iscsi_session_lookup(ev->u.b_conn.sid); if (!session) { err = -EINVAL; break; } ev->r.retcode = transport->bind_conn(session, conn, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); if (ev->r.retcode || !transport->ep_connect) break; ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); if (ep) { ep->conn = conn; conn->ep = ep; iscsi_put_endpoint(ep); } else { err = -ENOTCONN; iscsi_cls_conn_printk(KERN_ERR, conn, "Could not set ep conn binding\n"); } break; case ISCSI_UEVENT_START_CONN: ev->r.retcode = transport->start_conn(conn); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_UP); break; case ISCSI_UEVENT_SEND_PDU: if ((ev->u.send_pdu.hdr_size > pdu_len) || (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { err = -EINVAL; break; } ev->r.retcode = transport->send_pdu(conn, (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, ev->u.send_pdu.data_size); break; default: err = -ENOSYS; } mutex_unlock(&conn->ep_mutex); return err; } static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; struct iscsi_endpoint *ep = NULL; u32 rlen; if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else *group = ISCSI_NL_GRP_ISCSID; priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; transport = priv->iscsi_transport; if (!try_module_get(transport->owner)) return -EINVAL; portid = NETLINK_CB(skb).portid; /* * Even though the remaining payload may not be regarded as nlattr, * (like address or something else), calculate the remaining length * here to ease following length checks. */ rlen = nlmsg_attrlen(nlh, sizeof(*ev)); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); break; case ISCSI_UEVENT_CREATE_BOUND_SESSION: ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); if (!ep) { err = -EINVAL; break; } err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else transport->destroy_session(session); break; case ISCSI_UEVENT_DESTROY_SESSION_ASYNC: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else { unsigned long flags; /* Prevent this session from being found again */ spin_lock_irqsave(&sesslock, flags); list_del_init(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); queue_work(system_dfl_wq, &session->destroy_work); } break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (session) queue_work(session->workq, &session->unbind_work); else err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_if_set_param(transport, ev, rlen); break; case ISCSI_UEVENT_CREATE_CONN: case ISCSI_UEVENT_DESTROY_CONN: case ISCSI_UEVENT_STOP_CONN: case ISCSI_UEVENT_START_CONN: case ISCSI_UEVENT_BIND_CONN: case ISCSI_UEVENT_SEND_PDU: err = iscsi_if_transport_conn(transport, nlh, rlen); break; case ISCSI_UEVENT_GET_STATS: err = iscsi_if_get_stats(transport, nlh); break; case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen); break; case ISCSI_UEVENT_TGT_DSCVR: err = iscsi_tgt_dscvr(transport, ev, rlen); break; case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev, rlen); break; case ISCSI_UEVENT_PATH_UPDATE: err = iscsi_set_path(transport, ev, rlen); break; case ISCSI_UEVENT_SET_IFACE_PARAMS: err = iscsi_set_iface_params(transport, ev, rlen); break; case ISCSI_UEVENT_PING: err = iscsi_send_ping(transport, ev, rlen); break; case ISCSI_UEVENT_GET_CHAP: err = iscsi_get_chap(transport, nlh); break; case ISCSI_UEVENT_DELETE_CHAP: err = iscsi_delete_chap(transport, ev); break; case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: err = iscsi_set_flashnode_param(transport, ev, rlen); break; case ISCSI_UEVENT_NEW_FLASHNODE: err = iscsi_new_flashnode(transport, ev, rlen); break; case ISCSI_UEVENT_DEL_FLASHNODE: err = iscsi_del_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGIN_FLASHNODE: err = iscsi_login_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE: err = iscsi_logout_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: err = iscsi_logout_flashnode_sid(transport, ev); break; case ISCSI_UEVENT_SET_CHAP: err = iscsi_set_chap(transport, ev, rlen); break; case ISCSI_UEVENT_GET_HOST_STATS: err = iscsi_get_host_stats(transport, nlh); break; default: err = -ENOSYS; break; } module_put(transport->owner); return err; } /* * Get message from skb. Each message is processed by iscsi_if_recv_msg. * Malformed skbs with wrong lengths or invalid creds are not processed. */ static void iscsi_if_rx(struct sk_buff *skb) { u32 portid = NETLINK_CB(skb).portid; mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } ev = nlmsg_data(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; err = iscsi_if_recv_msg(skb, nlh, &group); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; } do { /* * special case for GET_STATS, GET_CHAP and GET_HOST_STATS: * on success - sending reply and stats from * inside of if_recv_msg(), * on error - fall through. */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); if (err == -EAGAIN && --retries < 0) { printk(KERN_WARNING "Send reply failed, error %d\n", err); break; } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } mutex_unlock(&rx_queue_mutex); } #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) /* * iSCSI connection attrs */ #define iscsi_conn_attr_show(param) \ static ssize_t \ show_conn_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ return t->get_conn_param(conn, param, buf); \ } #define iscsi_conn_attr(field, param) \ iscsi_conn_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \ NULL); iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH); iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH); iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN); iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); static const char *const connection_state_names[] = { [ISCSI_CONN_UP] = "up", [ISCSI_CONN_DOWN] = "down", [ISCSI_CONN_FAILED] = "failed", [ISCSI_CONN_BOUND] = "bound" }; static ssize_t show_conn_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; int conn_state = READ_ONCE(conn->state); if (conn_state >= 0 && conn_state < ARRAY_SIZE(connection_state_names)) state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state, NULL); #define iscsi_conn_ep_attr_show(param) \ static ssize_t show_conn_ep_param_##param(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ struct iscsi_endpoint *ep; \ ssize_t rc; \ \ /* \ * Need to make sure ep_disconnect does not free the LLD's \ * interconnect resources while we are trying to read them. \ */ \ mutex_lock(&conn->ep_mutex); \ ep = conn->ep; \ if (!ep && t->ep_connect) { \ mutex_unlock(&conn->ep_mutex); \ return -ENOTCONN; \ } \ \ if (ep) \ rc = t->get_ep_param(ep, param, buf); \ else \ rc = t->get_conn_param(conn, param, buf); \ mutex_unlock(&conn->ep_mutex); \ return rc; \ } #define iscsi_conn_ep_attr(field, param) \ iscsi_conn_ep_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ show_conn_ep_param_##param, NULL); iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); static struct attribute *iscsi_conn_attrs[] = { &dev_attr_conn_max_recv_dlength.attr, &dev_attr_conn_max_xmit_dlength.attr, &dev_attr_conn_header_digest.attr, &dev_attr_conn_data_digest.attr, &dev_attr_conn_ifmarker.attr, &dev_attr_conn_ofmarker.attr, &dev_attr_conn_address.attr, &dev_attr_conn_port.attr, &dev_attr_conn_exp_statsn.attr, &dev_attr_conn_persistent_address.attr, &dev_attr_conn_persistent_port.attr, &dev_attr_conn_ping_tmo.attr, &dev_attr_conn_recv_tmo.attr, &dev_attr_conn_local_port.attr, &dev_attr_conn_statsn.attr, &dev_attr_conn_keepalive_tmo.attr, &dev_attr_conn_max_segment_size.attr, &dev_attr_conn_tcp_timestamp_stat.attr, &dev_attr_conn_tcp_wsf_disable.attr, &dev_attr_conn_tcp_nagle_disable.attr, &dev_attr_conn_tcp_timer_scale.attr, &dev_attr_conn_tcp_timestamp_enable.attr, &dev_attr_conn_fragment_disable.attr, &dev_attr_conn_ipv4_tos.attr, &dev_attr_conn_ipv6_traffic_class.attr, &dev_attr_conn_ipv6_flow_label.attr, &dev_attr_conn_is_fw_assigned_ipv6.attr, &dev_attr_conn_tcp_xmit_wsf.attr, &dev_attr_conn_tcp_recv_wsf.attr, &dev_attr_conn_local_ipaddr.attr, &dev_attr_conn_state.attr, NULL, }; static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_conn *conn = transport_class_to_conn(cdev); struct iscsi_transport *t = conn->transport; int param; if (attr == &dev_attr_conn_max_recv_dlength.attr) param = ISCSI_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_conn_max_xmit_dlength.attr) param = ISCSI_PARAM_MAX_XMIT_DLENGTH; else if (attr == &dev_attr_conn_header_digest.attr) param = ISCSI_PARAM_HDRDGST_EN; else if (attr == &dev_attr_conn_data_digest.attr) param = ISCSI_PARAM_DATADGST_EN; else if (attr == &dev_attr_conn_ifmarker.attr) param = ISCSI_PARAM_IFMARKER_EN; else if (attr == &dev_attr_conn_ofmarker.attr) param = ISCSI_PARAM_OFMARKER_EN; else if (attr == &dev_attr_conn_address.attr) param = ISCSI_PARAM_CONN_ADDRESS; else if (attr == &dev_attr_conn_port.attr) param = ISCSI_PARAM_CONN_PORT; else if (attr == &dev_attr_conn_exp_statsn.attr) param = ISCSI_PARAM_EXP_STATSN; else if (attr == &dev_attr_conn_persistent_address.attr) param = ISCSI_PARAM_PERSISTENT_ADDRESS; else if (attr == &dev_attr_conn_persistent_port.attr) param = ISCSI_PARAM_PERSISTENT_PORT; else if (attr == &dev_attr_conn_ping_tmo.attr) param = ISCSI_PARAM_PING_TMO; else if (attr == &dev_attr_conn_recv_tmo.attr) param = ISCSI_PARAM_RECV_TMO; else if (attr == &dev_attr_conn_local_port.attr) param = ISCSI_PARAM_LOCAL_PORT; else if (attr == &dev_attr_conn_statsn.attr) param = ISCSI_PARAM_STATSN; else if (attr == &dev_attr_conn_keepalive_tmo.attr) param = ISCSI_PARAM_KEEPALIVE_TMO; else if (attr == &dev_attr_conn_max_segment_size.attr) param = ISCSI_PARAM_MAX_SEGMENT_SIZE; else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) param = ISCSI_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) param = ISCSI_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_conn_tcp_timer_scale.attr) param = ISCSI_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_conn_fragment_disable.attr) param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; else if (attr == &dev_attr_conn_ipv4_tos.attr) param = ISCSI_PARAM_IPV4_TOS; else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) param = ISCSI_PARAM_IPV6_TC; else if (attr == &dev_attr_conn_ipv6_flow_label.attr) param = ISCSI_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) param = ISCSI_PARAM_TCP_XMIT_WSF; else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) param = ISCSI_PARAM_TCP_RECV_WSF; else if (attr == &dev_attr_conn_local_ipaddr.attr) param = ISCSI_PARAM_LOCAL_IPADDR; else if (attr == &dev_attr_conn_state.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid conn attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_conn_group = { .attrs = iscsi_conn_attrs, .is_visible = iscsi_conn_attr_is_visible, }; /* * iSCSI session attrs */ #define iscsi_session_attr_show(param, perm) \ static ssize_t \ show_session_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ struct iscsi_transport *t = session->transport; \ \ if (perm && !capable(CAP_SYS_ADMIN)) \ return -EACCES; \ return t->get_session_param(session, param, buf); \ } #define iscsi_session_attr(field, param, perm) \ iscsi_session_attr_show(param, perm) \ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ NULL); iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0); iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0); iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0); iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0); iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0); iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0); iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0); iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1); iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1); iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); iscsi_session_attr(discovery_auth_optional, ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); static ssize_t show_priv_session_target_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_target_state_name[session->target_state]); } static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, show_priv_session_target_state, NULL); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); static ssize_t show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); static ssize_t show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); #define iscsi_priv_session_attr_show(field, format) \ static ssize_t \ show_priv_session_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ return sysfs_emit(buf, "off\n"); \ return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ static ssize_t \ store_priv_session_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ char *cp; \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if ((session->state == ISCSI_SESSION_FREE) || \ (session->state == ISCSI_SESSION_FAILED)) \ return -EBUSY; \ if (strncmp(buf, "off", 3) == 0) { \ session->field = -1; \ session->field##_sysfs_override = true; \ } else { \ val = simple_strtoul(buf, &cp, 0); \ if (*cp != '\0' && *cp != '\n') \ return -EINVAL; \ session->field = val; \ session->field##_sysfs_override = true; \ } \ return count; \ } #define iscsi_priv_session_rw_attr(field, format) \ iscsi_priv_session_attr_show(field, format) \ iscsi_priv_session_attr_store(field) \ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ show_priv_session_##field, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_initial_r2t.attr, &dev_attr_sess_max_outstanding_r2t.attr, &dev_attr_sess_immediate_data.attr, &dev_attr_sess_first_burst_len.attr, &dev_attr_sess_max_burst_len.attr, &dev_attr_sess_data_pdu_in_order.attr, &dev_attr_sess_data_seq_in_order.attr, &dev_attr_sess_erl.attr, &dev_attr_sess_targetname.attr, &dev_attr_sess_tpgt.attr, &dev_attr_sess_password.attr, &dev_attr_sess_password_in.attr, &dev_attr_sess_username.attr, &dev_attr_sess_username_in.attr, &dev_attr_sess_fast_abort.attr, &dev_attr_sess_abort_tmo.attr, &dev_attr_sess_lu_reset_tmo.attr, &dev_attr_sess_tgt_reset_tmo.attr, &dev_attr_sess_ifacename.attr, &dev_attr_sess_initiatorname.attr, &dev_attr_sess_targetalias.attr, &dev_attr_sess_boot_root.attr, &dev_attr_sess_boot_nic.attr, &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, &dev_attr_priv_sess_target_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, &dev_attr_priv_sess_target_id.attr, &dev_attr_sess_auto_snd_tgt_disable.attr, &dev_attr_sess_discovery_session.attr, &dev_attr_sess_portal_type.attr, &dev_attr_sess_chap_auth.attr, &dev_attr_sess_discovery_logout.attr, &dev_attr_sess_bidi_chap.attr, &dev_attr_sess_discovery_auth_optional.attr, &dev_attr_sess_def_time2wait.attr, &dev_attr_sess_def_time2retain.attr, &dev_attr_sess_isid.attr, &dev_attr_sess_tsid.attr, &dev_attr_sess_def_taskmgmt_tmo.attr, &dev_attr_sess_discovery_parent_idx.attr, &dev_attr_sess_discovery_parent_type.attr, NULL, }; static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_session *session = transport_class_to_session(cdev); struct iscsi_transport *t = session->transport; int param; if (attr == &dev_attr_sess_initial_r2t.attr) param = ISCSI_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_sess_max_outstanding_r2t.attr) param = ISCSI_PARAM_MAX_R2T; else if (attr == &dev_attr_sess_immediate_data.attr) param = ISCSI_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_sess_first_burst_len.attr) param = ISCSI_PARAM_FIRST_BURST; else if (attr == &dev_attr_sess_max_burst_len.attr) param = ISCSI_PARAM_MAX_BURST; else if (attr == &dev_attr_sess_data_pdu_in_order.attr) param = ISCSI_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_sess_data_seq_in_order.attr) param = ISCSI_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_sess_erl.attr) param = ISCSI_PARAM_ERL; else if (attr == &dev_attr_sess_targetname.attr) param = ISCSI_PARAM_TARGET_NAME; else if (attr == &dev_attr_sess_tpgt.attr) param = ISCSI_PARAM_TPGT; else if (attr == &dev_attr_sess_chap_in_idx.attr) param = ISCSI_PARAM_CHAP_IN_IDX; else if (attr == &dev_attr_sess_chap_out_idx.attr) param = ISCSI_PARAM_CHAP_OUT_IDX; else if (attr == &dev_attr_sess_password.attr) param = ISCSI_PARAM_USERNAME; else if (attr == &dev_attr_sess_password_in.attr) param = ISCSI_PARAM_USERNAME_IN; else if (attr == &dev_attr_sess_username.attr) param = ISCSI_PARAM_PASSWORD; else if (attr == &dev_attr_sess_username_in.attr) param = ISCSI_PARAM_PASSWORD_IN; else if (attr == &dev_attr_sess_fast_abort.attr) param = ISCSI_PARAM_FAST_ABORT; else if (attr == &dev_attr_sess_abort_tmo.attr) param = ISCSI_PARAM_ABORT_TMO; else if (attr == &dev_attr_sess_lu_reset_tmo.attr) param = ISCSI_PARAM_LU_RESET_TMO; else if (attr == &dev_attr_sess_tgt_reset_tmo.attr) param = ISCSI_PARAM_TGT_RESET_TMO; else if (attr == &dev_attr_sess_ifacename.attr) param = ISCSI_PARAM_IFACE_NAME; else if (attr == &dev_attr_sess_initiatorname.attr) param = ISCSI_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_sess_targetalias.attr) param = ISCSI_PARAM_TARGET_ALIAS; else if (attr == &dev_attr_sess_boot_root.attr) param = ISCSI_PARAM_BOOT_ROOT; else if (attr == &dev_attr_sess_boot_nic.attr) param = ISCSI_PARAM_BOOT_NIC; else if (attr == &dev_attr_sess_boot_target.attr) param = ISCSI_PARAM_BOOT_TARGET; else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; else if (attr == &dev_attr_sess_discovery_session.attr) param = ISCSI_PARAM_DISCOVERY_SESS; else if (attr == &dev_attr_sess_portal_type.attr) param = ISCSI_PARAM_PORTAL_TYPE; else if (attr == &dev_attr_sess_chap_auth.attr) param = ISCSI_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_sess_discovery_logout.attr) param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_sess_bidi_chap.attr) param = ISCSI_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_sess_discovery_auth_optional.attr) param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_sess_def_time2wait.attr) param = ISCSI_PARAM_DEF_TIME2WAIT; else if (attr == &dev_attr_sess_def_time2retain.attr) param = ISCSI_PARAM_DEF_TIME2RETAIN; else if (attr == &dev_attr_sess_isid.attr) param = ISCSI_PARAM_ISID; else if (attr == &dev_attr_sess_tsid.attr) param = ISCSI_PARAM_TSID; else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) param = ISCSI_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_sess_discovery_parent_idx.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; else if (attr == &dev_attr_sess_discovery_parent_type.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_creator.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_id.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid session attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_session_group = { .attrs = iscsi_session_attrs, .is_visible = iscsi_session_attr_is_visible, }; /* * iSCSI host attrs */ #define iscsi_host_attr_show(param) \ static ssize_t \ show_host_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \ return priv->iscsi_transport->get_host_param(shost, param, buf); \ } #define iscsi_host_attr(field, param) \ iscsi_host_attr_show(param) \ static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \ NULL); iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME); iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE); iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED); static struct attribute *iscsi_host_attrs[] = { &dev_attr_host_netdev.attr, &dev_attr_host_hwaddress.attr, &dev_attr_host_ipaddress.attr, &dev_attr_host_initiatorname.attr, &dev_attr_host_port_state.attr, &dev_attr_host_port_speed.attr, NULL, }; static umode_t iscsi_host_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); int param; if (attr == &dev_attr_host_netdev.attr) param = ISCSI_HOST_PARAM_NETDEV_NAME; else if (attr == &dev_attr_host_hwaddress.attr) param = ISCSI_HOST_PARAM_HWADDRESS; else if (attr == &dev_attr_host_ipaddress.attr) param = ISCSI_HOST_PARAM_IPADDRESS; else if (attr == &dev_attr_host_initiatorname.attr) param = ISCSI_HOST_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_host_port_state.attr) param = ISCSI_HOST_PARAM_PORT_STATE; else if (attr == &dev_attr_host_port_speed.attr) param = ISCSI_HOST_PARAM_PORT_SPEED; else { WARN_ONCE(1, "Invalid host attr"); return 0; } return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param); } static struct attribute_group iscsi_host_group = { .attrs = iscsi_host_attrs, .is_visible = iscsi_host_attr_is_visible, }; /* convert iscsi_port_speed values to ascii string name */ static const struct { enum iscsi_port_speed value; char *name; } iscsi_port_speed_names[] = { {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" }, {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" }, {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" }, {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" }, {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" }, {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" }, {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" }, }; char *iscsi_get_port_speed_name(struct Scsi_Host *shost) { int i; char *speed = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_speed = ihost->port_speed; for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) { if (iscsi_port_speed_names[i].value & port_speed) { speed = iscsi_port_speed_names[i].name; break; } } return speed; } EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name); /* convert iscsi_port_state values to ascii string name */ static const struct { enum iscsi_port_state value; char *name; } iscsi_port_state_names[] = { {ISCSI_PORT_STATE_DOWN, "LINK DOWN" }, {ISCSI_PORT_STATE_UP, "LINK UP" }, }; char *iscsi_get_port_state_name(struct Scsi_Host *shost) { int i; char *state = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_state = ihost->port_state; for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) { if (iscsi_port_state_names[i].value & port_state) { state = iscsi_port_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_port_state_name); static int iscsi_session_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->session_cont.ac.class != &iscsi_session_class.class) return 0; return &priv->session_cont.ac == cont; } static int iscsi_conn_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct iscsi_cls_conn *conn; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); session = iscsi_dev_to_session(conn->dev.parent); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->conn_cont.ac.class != &iscsi_connection_class.class) return 0; return &priv->conn_cont.ac == cont; } static int iscsi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct iscsi_internal *priv; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &iscsi_host_class.class) return 0; priv = to_iscsi_internal(shost->transportt); return &priv->t.host_attrs.ac == cont; } struct scsi_transport_template * iscsi_register_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; int err; BUG_ON(!tt); WARN_ON(tt->ep_disconnect && !tt->unbind_conn); priv = iscsi_if_transport_lookup(tt); if (priv) return NULL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->dev.class = &iscsi_transport_class; dev_set_name(&priv->dev, "%s", tt->name); err = device_register(&priv->dev); if (err) goto put_dev; err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); if (err) goto unregister_dev; /* host parameters */ priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.match = iscsi_host_match; priv->t.host_attrs.ac.grp = &iscsi_host_group; priv->t.host_size = sizeof(struct iscsi_cls_host); transport_container_register(&priv->t.host_attrs); /* connection parameters */ priv->conn_cont.ac.class = &iscsi_connection_class.class; priv->conn_cont.ac.match = iscsi_conn_match; priv->conn_cont.ac.grp = &iscsi_conn_group; transport_container_register(&priv->conn_cont); /* session parameters */ priv->session_cont.ac.class = &iscsi_session_class.class; priv->session_cont.ac.match = iscsi_session_match; priv->session_cont.ac.grp = &iscsi_session_group; transport_container_register(&priv->session_cont); spin_lock_irqsave(&iscsi_transport_lock, flags); list_add(&priv->list, &iscsi_transports); spin_unlock_irqrestore(&iscsi_transport_lock, flags); printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name); return &priv->t; unregister_dev: device_unregister(&priv->dev); return NULL; put_dev: put_device(&priv->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_register_transport); void iscsi_unregister_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; BUG_ON(!tt); mutex_lock(&rx_queue_mutex); priv = iscsi_if_transport_lookup(tt); BUG_ON (!priv); spin_lock_irqsave(&iscsi_transport_lock, flags); list_del(&priv->list); spin_unlock_irqrestore(&iscsi_transport_lock, flags); transport_container_unregister(&priv->conn_cont); transport_container_unregister(&priv->session_cont); transport_container_unregister(&priv->t.host_attrs); sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group); device_unregister(&priv->dev); mutex_unlock(&rx_queue_mutex); } EXPORT_SYMBOL_GPL(iscsi_unregister_transport); void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; trace(dev, &vaf); va_end(args); } EXPORT_SYMBOL_GPL(iscsi_dbg_trace); static __init int iscsi_transport_init(void) { int err; struct netlink_kernel_cfg cfg = { .groups = 1, .input = iscsi_if_rx, }; printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); atomic_set(&iscsi_session_nr, 0); err = class_register(&iscsi_transport_class); if (err) return err; err = class_register(&iscsi_endpoint_class); if (err) goto unregister_transport_class; err = class_register(&iscsi_iface_class); if (err) goto unregister_endpoint_class; err = transport_class_register(&iscsi_host_class); if (err) goto unregister_iface_class; err = transport_class_register(&iscsi_connection_class); if (err) goto unregister_host_class; err = transport_class_register(&iscsi_session_class); if (err) goto unregister_conn_class; err = bus_register(&iscsi_flashnode_bus); if (err) goto unregister_session_class; nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); if (!nls) { err = -ENOBUFS; goto unregister_flashnode_bus; } iscsi_conn_cleanup_workq = alloc_workqueue("%s", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, "iscsi_conn_cleanup"); if (!iscsi_conn_cleanup_workq) { err = -ENOMEM; goto release_nls; } return 0; release_nls: netlink_kernel_release(nls); unregister_flashnode_bus: bus_unregister(&iscsi_flashnode_bus); unregister_session_class: transport_class_unregister(&iscsi_session_class); unregister_conn_class: transport_class_unregister(&iscsi_connection_class); unregister_host_class: transport_class_unregister(&iscsi_host_class); unregister_iface_class: class_unregister(&iscsi_iface_class); unregister_endpoint_class: class_unregister(&iscsi_endpoint_class); unregister_transport_class: class_unregister(&iscsi_transport_class); return err; } static void __exit iscsi_transport_exit(void) { destroy_workqueue(iscsi_conn_cleanup_workq); netlink_kernel_release(nls); bus_unregister(&iscsi_flashnode_bus); transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); class_unregister(&iscsi_endpoint_class); class_unregister(&iscsi_iface_class); class_unregister(&iscsi_transport_class); } module_init(iscsi_transport_init); module_exit(iscsi_transport_exit); MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " "Dmitry Yusupov <dmitry_yus@yahoo.com>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); MODULE_VERSION(ISCSI_TRANSPORT_VERSION); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI); |
| 1 1 11 1 1 1 1 1 1 11 1 1 1 1 1 1 1 1 1 1 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <net/genetlink.h> #define CREATE_TRACE_POINTS #include <trace/events/devlink.h> #include "devl_internal.h" EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report); DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC); static struct devlink *devlinks_xa_get(unsigned long index) { struct devlink *devlink; rcu_read_lock(); devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED); if (!devlink || !devlink_try_get(devlink)) devlink = NULL; rcu_read_unlock(); return devlink; } /* devlink_rels xarray contains 1:1 relationships between * devlink object and related nested devlink instance. * The xarray index is used to get the nested object from * the nested-in object code. */ static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1); #define DEVLINK_REL_IN_USE XA_MARK_0 struct devlink_rel { u32 index; refcount_t refcount; u32 devlink_index; struct { u32 devlink_index; u32 obj_index; devlink_rel_notify_cb_t *notify_cb; devlink_rel_cleanup_cb_t *cleanup_cb; struct delayed_work notify_work; } nested_in; }; static void devlink_rel_free(struct devlink_rel *rel) { xa_erase(&devlink_rels, rel->index); kfree(rel); } static void __devlink_rel_get(struct devlink_rel *rel) { refcount_inc(&rel->refcount); } static void __devlink_rel_put(struct devlink_rel *rel) { if (refcount_dec_and_test(&rel->refcount)) devlink_rel_free(rel); } static void devlink_rel_nested_in_notify_work(struct work_struct *work) { struct devlink_rel *rel = container_of(work, struct devlink_rel, nested_in.notify_work.work); struct devlink *devlink; devlink = devlinks_xa_get(rel->nested_in.devlink_index); if (!devlink) goto rel_put; if (!devl_trylock(devlink)) { devlink_put(devlink); goto reschedule_work; } if (!devl_is_registered(devlink)) { devl_unlock(devlink); devlink_put(devlink); goto rel_put; } if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE)) rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index); rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index); devl_unlock(devlink); devlink_put(devlink); rel_put: __devlink_rel_put(rel); return; reschedule_work: schedule_delayed_work(&rel->nested_in.notify_work, 1); } static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel) { __devlink_rel_get(rel); schedule_delayed_work(&rel->nested_in.notify_work, 0); } static struct devlink_rel *devlink_rel_alloc(void) { struct devlink_rel *rel; static u32 next; int err; rel = kzalloc(sizeof(*rel), GFP_KERNEL); if (!rel) return ERR_PTR(-ENOMEM); err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel, xa_limit_32b, &next, GFP_KERNEL); if (err < 0) { kfree(rel); return ERR_PTR(err); } refcount_set(&rel->refcount, 1); INIT_DELAYED_WORK(&rel->nested_in.notify_work, &devlink_rel_nested_in_notify_work); return rel; } static void devlink_rel_put(struct devlink *devlink) { struct devlink_rel *rel = devlink->rel; if (!rel) return; xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); devlink_rel_nested_in_notify_work_schedule(rel); __devlink_rel_put(rel); devlink->rel = NULL; } void devlink_rel_nested_in_clear(u32 rel_index) { xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE); } int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index, u32 obj_index, devlink_rel_notify_cb_t *notify_cb, devlink_rel_cleanup_cb_t *cleanup_cb, struct devlink *devlink) { struct devlink_rel *rel = devlink_rel_alloc(); ASSERT_DEVLINK_NOT_REGISTERED(devlink); if (IS_ERR(rel)) return PTR_ERR(rel); rel->devlink_index = devlink->index; rel->nested_in.devlink_index = devlink_index; rel->nested_in.obj_index = obj_index; rel->nested_in.notify_cb = notify_cb; rel->nested_in.cleanup_cb = cleanup_cb; *rel_index = rel->index; xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); devlink->rel = rel; return 0; } /** * devlink_rel_nested_in_notify - Notify the object this devlink * instance is nested in. * @devlink: devlink * * This is called upon network namespace change of devlink instance. * In case this devlink instance is nested in another devlink object, * a notification of a change of this object should be sent * over netlink. The parent devlink instance lock needs to be * taken during the notification preparation. * However, since the devlink lock of nested instance is held here, * we would end with wrong devlink instance lock ordering and * deadlock. Therefore the work is utilized to avoid that. */ void devlink_rel_nested_in_notify(struct devlink *devlink) { struct devlink_rel *rel = devlink->rel; if (!rel) return; devlink_rel_nested_in_notify_work_schedule(rel); } static struct devlink_rel *devlink_rel_find(unsigned long rel_index) { return xa_find(&devlink_rels, &rel_index, rel_index, DEVLINK_REL_IN_USE); } static struct devlink *devlink_rel_devlink_get(u32 rel_index) { struct devlink_rel *rel; u32 devlink_index; if (!rel_index) return NULL; xa_lock(&devlink_rels); rel = devlink_rel_find(rel_index); if (rel) devlink_index = rel->devlink_index; xa_unlock(&devlink_rels); if (!rel) return NULL; return devlinks_xa_get(devlink_index); } int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink, u32 rel_index, int attrtype, bool *msg_updated) { struct net *net = devlink_net(devlink); struct devlink *rel_devlink; int err; rel_devlink = devlink_rel_devlink_get(rel_index); if (!rel_devlink) return 0; err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype); devlink_put(rel_devlink); if (!err && msg_updated) *msg_updated = true; return err; } void *devlink_priv(struct devlink *devlink) { return &devlink->priv; } EXPORT_SYMBOL_GPL(devlink_priv); struct devlink *priv_to_devlink(void *priv) { return container_of(priv, struct devlink, priv); } EXPORT_SYMBOL_GPL(priv_to_devlink); struct device *devlink_to_dev(const struct devlink *devlink) { return devlink->dev; } EXPORT_SYMBOL_GPL(devlink_to_dev); struct net *devlink_net(const struct devlink *devlink) { return read_pnet(&devlink->_net); } EXPORT_SYMBOL_GPL(devlink_net); void devl_assert_locked(struct devlink *devlink) { lockdep_assert_held(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_assert_locked); #ifdef CONFIG_LOCKDEP /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */ bool devl_lock_is_held(struct devlink *devlink) { return lockdep_is_held(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_lock_is_held); #endif void devl_lock(struct devlink *devlink) { mutex_lock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_lock); int devl_trylock(struct devlink *devlink) { return mutex_trylock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_trylock); void devl_unlock(struct devlink *devlink) { mutex_unlock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_unlock); /** * devlink_try_get() - try to obtain a reference on a devlink instance * @devlink: instance to reference * * Obtain a reference on a devlink instance. A reference on a devlink instance * only implies that it's safe to take the instance lock. It does not imply * that the instance is registered, use devl_is_registered() after taking * the instance lock to check registration status. */ struct devlink *__must_check devlink_try_get(struct devlink *devlink) { if (refcount_inc_not_zero(&devlink->refcount)) return devlink; return NULL; } static void devlink_release(struct work_struct *work) { struct devlink *devlink; devlink = container_of(to_rcu_work(work), struct devlink, rwork); mutex_destroy(&devlink->lock); lockdep_unregister_key(&devlink->lock_key); put_device(devlink->dev); kvfree(devlink); } void devlink_put(struct devlink *devlink) { if (refcount_dec_and_test(&devlink->refcount)) queue_rcu_work(system_percpu_wq, &devlink->rwork); } struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp) { struct devlink *devlink = NULL; rcu_read_lock(); retry: devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED); if (!devlink) goto unlock; if (!devlink_try_get(devlink)) goto next; if (!net_eq(devlink_net(devlink), net)) { devlink_put(devlink); goto next; } unlock: rcu_read_unlock(); return devlink; next: (*indexp)++; goto retry; } /** * devl_register - Register devlink instance * @devlink: devlink */ int devl_register(struct devlink *devlink) { ASSERT_DEVLINK_NOT_REGISTERED(devlink); devl_assert_locked(devlink); xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); devlink_notify_register(devlink); devlink_rel_nested_in_notify(devlink); return 0; } EXPORT_SYMBOL_GPL(devl_register); void devlink_register(struct devlink *devlink) { devl_lock(devlink); devl_register(devlink); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_register); /** * devl_unregister - Unregister devlink instance * @devlink: devlink */ void devl_unregister(struct devlink *devlink) { ASSERT_DEVLINK_REGISTERED(devlink); devl_assert_locked(devlink); devlink_notify_unregister(devlink); xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); devlink_rel_put(devlink); } EXPORT_SYMBOL_GPL(devl_unregister); void devlink_unregister(struct devlink *devlink) { devl_lock(devlink); devl_unregister(devlink); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_unregister); /** * devlink_alloc_ns - Allocate new devlink instance resources * in specific namespace * * @ops: ops * @priv_size: size of user private data * @net: net namespace * @dev: parent device * * Allocate new devlink instance resources, including devlink index * and name. */ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, size_t priv_size, struct net *net, struct device *dev) { struct devlink *devlink; static u32 last_id; int ret; WARN_ON(!ops || !dev); if (!devlink_reload_actions_valid(ops)) return NULL; devlink = kvzalloc(struct_size(devlink, priv, priv_size), GFP_KERNEL); if (!devlink) return NULL; ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b, &last_id, GFP_KERNEL); if (ret < 0) goto err_xa_alloc; devlink->dev = get_device(dev); devlink->ops = ops; xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC); xa_init_flags(&devlink->params, XA_FLAGS_ALLOC); xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC); xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC); write_pnet(&devlink->_net, net); INIT_LIST_HEAD(&devlink->rate_list); INIT_LIST_HEAD(&devlink->linecard_list); INIT_LIST_HEAD(&devlink->sb_list); INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); INIT_LIST_HEAD(&devlink->region_list); INIT_LIST_HEAD(&devlink->reporter_list); INIT_LIST_HEAD(&devlink->trap_list); INIT_LIST_HEAD(&devlink->trap_group_list); INIT_LIST_HEAD(&devlink->trap_policer_list); INIT_RCU_WORK(&devlink->rwork, devlink_release); lockdep_register_key(&devlink->lock_key); mutex_init(&devlink->lock); lockdep_set_class(&devlink->lock, &devlink->lock_key); refcount_set(&devlink->refcount, 1); return devlink; err_xa_alloc: kvfree(devlink); return NULL; } EXPORT_SYMBOL_GPL(devlink_alloc_ns); /** * devlink_free - Free devlink instance resources * * @devlink: devlink */ void devlink_free(struct devlink *devlink) { ASSERT_DEVLINK_NOT_REGISTERED(devlink); WARN_ON(!list_empty(&devlink->trap_policer_list)); WARN_ON(!list_empty(&devlink->trap_group_list)); WARN_ON(!list_empty(&devlink->trap_list)); WARN_ON(!list_empty(&devlink->reporter_list)); WARN_ON(!list_empty(&devlink->region_list)); WARN_ON(!list_empty(&devlink->resource_list)); WARN_ON(!list_empty(&devlink->dpipe_table_list)); WARN_ON(!list_empty(&devlink->sb_list)); WARN_ON(!list_empty(&devlink->rate_list)); WARN_ON(!list_empty(&devlink->linecard_list)); WARN_ON(!xa_empty(&devlink->ports)); xa_destroy(&devlink->nested_rels); xa_destroy(&devlink->snapshot_ids); xa_destroy(&devlink->params); xa_destroy(&devlink->ports); xa_erase(&devlinks, devlink->index); devlink_put(devlink); } EXPORT_SYMBOL_GPL(devlink_free); static void __net_exit devlink_pernet_pre_exit(struct net *net) { struct devlink *devlink; u32 actions_performed; unsigned long index; int err; /* In case network namespace is getting destroyed, reload * all devlink instances from this namespace into init_net. */ devlinks_xa_for_each_registered_get(net, index, devlink) { devl_dev_lock(devlink, true); err = 0; if (devl_is_registered(devlink)) err = devlink_reload(devlink, &init_net, DEVLINK_RELOAD_ACTION_DRIVER_REINIT, DEVLINK_RELOAD_LIMIT_UNSPEC, &actions_performed, NULL); devl_dev_unlock(devlink, true); devlink_put(devlink); if (err && err != -EOPNOTSUPP) pr_warn("Failed to reload devlink instance into init_net\n"); } } static struct pernet_operations devlink_pernet_ops __net_initdata = { .pre_exit = devlink_pernet_pre_exit, }; static struct notifier_block devlink_port_netdevice_nb = { .notifier_call = devlink_port_netdevice_event, }; static int __init devlink_init(void) { int err; err = register_pernet_subsys(&devlink_pernet_ops); if (err) goto out; err = genl_register_family(&devlink_nl_family); if (err) goto out_unreg_pernet_subsys; err = register_netdevice_notifier(&devlink_port_netdevice_nb); if (!err) return 0; genl_unregister_family(&devlink_nl_family); out_unreg_pernet_subsys: unregister_pernet_subsys(&devlink_pernet_ops); out: WARN_ON(err); return err; } subsys_initcall(devlink_init); |
| 3245 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_CURRENT_H #define _ASM_X86_CURRENT_H #include <linux/build_bug.h> #include <linux/compiler.h> #ifndef __ASSEMBLER__ #include <linux/cache.h> #include <asm/percpu.h> struct task_struct; DECLARE_PER_CPU_CACHE_HOT(struct task_struct *, current_task); /* const-qualified alias provided by the linker. */ DECLARE_PER_CPU_CACHE_HOT(struct task_struct * const __percpu_seg_override, const_current_task); static __always_inline struct task_struct *get_current(void) { if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT)) return this_cpu_read_const(const_current_task); return this_cpu_read_stable(current_task); } #define current get_current() #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_CURRENT_H */ |
| 4 18 35 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * ocfs2_fs.h * * On-disk structures for OCFS2. * * Copyright (C) 2002, 2004 Oracle. All rights reserved. */ #ifndef _OCFS2_FS_H #define _OCFS2_FS_H #include <linux/magic.h> /* Version */ #define OCFS2_MAJOR_REV_LEVEL 0 #define OCFS2_MINOR_REV_LEVEL 90 /* * An OCFS2 volume starts this way: * Sector 0: Valid ocfs1_vol_disk_hdr that cleanly fails to mount OCFS. * Sector 1: Valid ocfs1_vol_label that cleanly fails to mount OCFS. * Block OCFS2_SUPER_BLOCK_BLKNO: OCFS2 superblock. * * All other structures are found from the superblock information. * * OCFS2_SUPER_BLOCK_BLKNO is in blocks, not sectors. eg, for a * blocksize of 2K, it is 4096 bytes into disk. */ #define OCFS2_SUPER_BLOCK_BLKNO 2 /* * Cluster size limits. The maximum is kept arbitrarily at 1 MB, and could * grow if needed. */ #define OCFS2_MIN_CLUSTERSIZE 4096 #define OCFS2_MAX_CLUSTERSIZE 1048576 /* * Blocks cannot be bigger than clusters, so the maximum blocksize is the * minimum cluster size. */ #define OCFS2_MIN_BLOCKSIZE 512 #define OCFS2_MAX_BLOCKSIZE OCFS2_MIN_CLUSTERSIZE /* Object signatures */ #define OCFS2_SUPER_BLOCK_SIGNATURE "OCFSV2" #define OCFS2_INODE_SIGNATURE "INODE01" #define OCFS2_EXTENT_BLOCK_SIGNATURE "EXBLK01" #define OCFS2_GROUP_DESC_SIGNATURE "GROUP01" #define OCFS2_XATTR_BLOCK_SIGNATURE "XATTR01" #define OCFS2_DIR_TRAILER_SIGNATURE "DIRTRL1" #define OCFS2_DX_ROOT_SIGNATURE "DXDIR01" #define OCFS2_DX_LEAF_SIGNATURE "DXLEAF1" #define OCFS2_REFCOUNT_BLOCK_SIGNATURE "REFCNT1" /* Compatibility flags */ #define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \ ( OCFS2_SB(sb)->s_feature_compat & (mask) ) #define OCFS2_HAS_RO_COMPAT_FEATURE(sb,mask) \ ( OCFS2_SB(sb)->s_feature_ro_compat & (mask) ) #define OCFS2_HAS_INCOMPAT_FEATURE(sb,mask) \ ( OCFS2_SB(sb)->s_feature_incompat & (mask) ) #define OCFS2_SET_COMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_compat |= (mask) #define OCFS2_SET_RO_COMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_ro_compat |= (mask) #define OCFS2_SET_INCOMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_incompat |= (mask) #define OCFS2_CLEAR_COMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_compat &= ~(mask) #define OCFS2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_ro_compat &= ~(mask) #define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_incompat &= ~(mask) #define OCFS2_FEATURE_COMPAT_SUPP (OCFS2_FEATURE_COMPAT_BACKUP_SB \ | OCFS2_FEATURE_COMPAT_JBD2_SB) #define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \ | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \ | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \ | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ | OCFS2_FEATURE_INCOMPAT_XATTR \ | OCFS2_FEATURE_INCOMPAT_META_ECC \ | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) /* * Heartbeat-only devices are missing journals and other files. The * filesystem driver can't load them, but the library can. Never put * this in OCFS2_FEATURE_INCOMPAT_SUPP, *ever*. */ #define OCFS2_FEATURE_INCOMPAT_HEARTBEAT_DEV 0x0002 /* * tunefs sets this incompat flag before starting the resize and clears it * at the end. This flag protects users from inadvertently mounting the fs * after an aborted run without fsck-ing. */ #define OCFS2_FEATURE_INCOMPAT_RESIZE_INPROG 0x0004 /* Used to denote a non-clustered volume */ #define OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT 0x0008 /* Support for sparse allocation in b-trees */ #define OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC 0x0010 /* * Tunefs sets this incompat flag before starting an operation which * would require cleanup on abort. This is done to protect users from * inadvertently mounting the fs after an aborted run without * fsck-ing. * * s_tunefs_flags on the super block describes precisely which * operations were in progress. */ #define OCFS2_FEATURE_INCOMPAT_TUNEFS_INPROG 0x0020 /* Support for data packed into inode blocks */ #define OCFS2_FEATURE_INCOMPAT_INLINE_DATA 0x0040 /* * Support for alternate, userspace cluster stacks. If set, the superblock * field s_cluster_info contains a tag for the alternate stack in use as * well as the name of the cluster being joined. * mount.ocfs2 must pass in a matching stack name. * * If not set, the classic stack will be used. This is compatible with * all older versions. */ #define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 /* Support for the extended slot map */ #define OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP 0x100 /* Support for extended attributes */ #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 /* Support for indexed directories */ #define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400 /* Metadata checksum and error correction */ #define OCFS2_FEATURE_INCOMPAT_META_ECC 0x0800 /* Refcount tree support */ #define OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE 0x1000 /* Discontiguous block groups */ #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000 /* * Incompat bit to indicate usable clusterinfo with stackflags for all * cluster stacks (userspace adnd o2cb). If this bit is set, * INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set. */ #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 /* * Append Direct IO support */ #define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 /* * backup superblock flag is used to indicate that this volume * has backup superblocks. */ #define OCFS2_FEATURE_COMPAT_BACKUP_SB 0x0001 /* * The filesystem will correctly handle journal feature bits. */ #define OCFS2_FEATURE_COMPAT_JBD2_SB 0x0002 /* * Unwritten extents support. */ #define OCFS2_FEATURE_RO_COMPAT_UNWRITTEN 0x0001 /* * Maintain quota information for this filesystem */ #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 /* The byte offset of the first backup block will be 1G. * The following will be 4G, 16G, 64G, 256G and 1T. */ #define OCFS2_BACKUP_SB_START 1 << 30 /* the max backup superblock nums */ #define OCFS2_MAX_BACKUP_SUPERBLOCKS 6 /* * Flags on ocfs2_super_block.s_tunefs_flags */ #define OCFS2_TUNEFS_INPROG_REMOVE_SLOT 0x0001 /* Removing slots */ /* * Flags on ocfs2_dinode.i_flags */ #define OCFS2_VALID_FL (0x00000001) /* Inode is valid */ #define OCFS2_UNUSED2_FL (0x00000002) #define OCFS2_ORPHANED_FL (0x00000004) /* On the orphan list */ #define OCFS2_UNUSED3_FL (0x00000008) /* System inode flags */ #define OCFS2_SYSTEM_FL (0x00000010) /* System inode */ #define OCFS2_SUPER_BLOCK_FL (0x00000020) /* Super block */ #define OCFS2_LOCAL_ALLOC_FL (0x00000040) /* Slot local alloc bitmap */ #define OCFS2_BITMAP_FL (0x00000080) /* Allocation bitmap */ #define OCFS2_JOURNAL_FL (0x00000100) /* Slot local journal */ #define OCFS2_HEARTBEAT_FL (0x00000200) /* Heartbeat area */ #define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ #define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ #define OCFS2_QUOTA_FL (0x00001000) /* Quota file */ #define OCFS2_DIO_ORPHANED_FL (0X00002000) /* On the orphan list especially * for dio */ /* * Flags on ocfs2_dinode.i_dyn_features * * These can change much more often than i_flags. When adding flags, * keep in mind that i_dyn_features is only 16 bits wide. */ #define OCFS2_INLINE_DATA_FL (0x0001) /* Data stored in inode block */ #define OCFS2_HAS_XATTR_FL (0x0002) #define OCFS2_INLINE_XATTR_FL (0x0004) #define OCFS2_INDEXED_DIR_FL (0x0008) #define OCFS2_HAS_REFCOUNT_FL (0x0010) /* Inode attributes, keep in sync with EXT2 */ #define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */ #define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */ #define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */ #define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */ #define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */ #define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */ #define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */ #define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */ /* Reserved for compression usage... */ #define OCFS2_DIRTY_FL FS_DIRTY_FL #define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */ #define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */ #define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */ /* End compression flags --- maybe not all used */ #define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */ #define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */ #define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */ #define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */ #define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ #define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ #define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ #define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ #define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ /* * Extent record flags (e_node.leaf.flags) */ #define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but * unwritten */ #define OCFS2_EXT_REFCOUNTED (0x02) /* Extent is reference * counted in an associated * refcount tree */ /* * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) */ #define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */ /* * superblock s_state flags */ #define OCFS2_ERROR_FS (0x00000001) /* FS saw errors */ /* Limit of space in ocfs2_dir_entry */ #define OCFS2_MAX_FILENAME_LEN 255 /* Maximum slots on an ocfs2 file system */ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ #define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 /* The cluster stack fields */ #define OCFS2_STACK_LABEL_LEN 4 #define OCFS2_CLUSTER_NAME_LEN 16 /* Classic (historically speaking) cluster stack */ #define OCFS2_CLASSIC_CLUSTER_STACK "o2cb" /* Journal limits (in bytes) */ #define OCFS2_MIN_JOURNAL_SIZE (4 * 1024 * 1024) /* * Inline extended attribute size (in bytes) * The value chosen should be aligned to 16 byte boundaries. */ #define OCFS2_MIN_XATTR_INLINE_SIZE 256 /* * Cluster info flags (ocfs2_cluster_info.ci_stackflags) */ #define OCFS2_CLUSTER_O2CB_GLOBAL_HEARTBEAT (0x01) struct ocfs2_system_inode_info { char *si_name; int si_iflags; int si_mode; }; /* System file index */ enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, #define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, GROUP_QUOTA_SYSTEM_INODE, #define OCFS2_LAST_GLOBAL_SYSTEM_INODE GROUP_QUOTA_SYSTEM_INODE #define OCFS2_FIRST_LOCAL_SYSTEM_INODE ORPHAN_DIR_SYSTEM_INODE ORPHAN_DIR_SYSTEM_INODE, EXTENT_ALLOC_SYSTEM_INODE, INODE_ALLOC_SYSTEM_INODE, JOURNAL_SYSTEM_INODE, LOCAL_ALLOC_SYSTEM_INODE, TRUNCATE_LOG_SYSTEM_INODE, LOCAL_USER_QUOTA_SYSTEM_INODE, LOCAL_GROUP_QUOTA_SYSTEM_INODE, #define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE NUM_SYSTEM_INODES }; #define NUM_GLOBAL_SYSTEM_INODES OCFS2_FIRST_LOCAL_SYSTEM_INODE #define NUM_LOCAL_SYSTEM_INODES \ (NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE) static struct ocfs2_system_inode_info ocfs2_system_inodes[NUM_SYSTEM_INODES] = { /* Global system inodes (single copy) */ /* The first two are only used from userspace mfks/tunefs */ [BAD_BLOCK_SYSTEM_INODE] = { "bad_blocks", 0, S_IFREG | 0644 }, [GLOBAL_INODE_ALLOC_SYSTEM_INODE] = { "global_inode_alloc", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, /* These are used by the running filesystem */ [SLOT_MAP_SYSTEM_INODE] = { "slot_map", 0, S_IFREG | 0644 }, [HEARTBEAT_SYSTEM_INODE] = { "heartbeat", OCFS2_HEARTBEAT_FL, S_IFREG | 0644 }, [GLOBAL_BITMAP_SYSTEM_INODE] = { "global_bitmap", 0, S_IFREG | 0644 }, [USER_QUOTA_SYSTEM_INODE] = { "aquota.user", OCFS2_QUOTA_FL, S_IFREG | 0644 }, [GROUP_QUOTA_SYSTEM_INODE] = { "aquota.group", OCFS2_QUOTA_FL, S_IFREG | 0644 }, /* Slot-specific system inodes (one copy per slot) */ [ORPHAN_DIR_SYSTEM_INODE] = { "orphan_dir:%04d", 0, S_IFDIR | 0755 }, [EXTENT_ALLOC_SYSTEM_INODE] = { "extent_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, [INODE_ALLOC_SYSTEM_INODE] = { "inode_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, [JOURNAL_SYSTEM_INODE] = { "journal:%04d", OCFS2_JOURNAL_FL, S_IFREG | 0644 }, [LOCAL_ALLOC_SYSTEM_INODE] = { "local_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_LOCAL_ALLOC_FL, S_IFREG | 0644 }, [TRUNCATE_LOG_SYSTEM_INODE] = { "truncate_log:%04d", OCFS2_DEALLOC_FL, S_IFREG | 0644 }, [LOCAL_USER_QUOTA_SYSTEM_INODE] = { "aquota.user:%04d", OCFS2_QUOTA_FL, S_IFREG | 0644 }, [LOCAL_GROUP_QUOTA_SYSTEM_INODE] = { "aquota.group:%04d", OCFS2_QUOTA_FL, S_IFREG | 0644 }, }; /* Parameter passed from mount.ocfs2 to module */ #define OCFS2_HB_NONE "heartbeat=none" #define OCFS2_HB_LOCAL "heartbeat=local" #define OCFS2_HB_GLOBAL "heartbeat=global" /* * OCFS2_DIR_PAD defines the directory entries boundaries * * NOTE: It must be a multiple of 4 */ #define OCFS2_DIR_PAD 4 #define OCFS2_DIR_ROUND (OCFS2_DIR_PAD - 1) #define OCFS2_DIR_MEMBER_LEN offsetof(struct ocfs2_dir_entry, name) #define OCFS2_DIR_REC_LEN(name_len) (((name_len) + OCFS2_DIR_MEMBER_LEN + \ OCFS2_DIR_ROUND) & \ ~OCFS2_DIR_ROUND) #define OCFS2_DIR_MIN_REC_LEN OCFS2_DIR_REC_LEN(1) #define OCFS2_LINK_MAX 32000 #define OCFS2_DX_LINK_MAX ((1U << 31) - 1U) #define OCFS2_LINKS_HI_SHIFT 16 #define OCFS2_DX_ENTRIES_MAX (0xffffffffU) /* * Convenience casts */ #define OCFS2_RAW_SB(dinode) (&((dinode)->id2.i_super)) /* * Block checking structure. This is used in metadata to validate the * contents. If OCFS2_FEATURE_INCOMPAT_META_ECC is not set, it is all * zeros. */ struct ocfs2_block_check { /*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */ __le16 bc_ecc; /* Single-error-correction parity vector. This is a simple Hamming code dependent on the blocksize. OCFS2's maximum blocksize, 4K, requires 16 parity bits, so we fit in __le16. */ __le16 bc_reserved1; /*08*/ }; /* * On disk extent record for OCFS2 * It describes a range of clusters on disk. * * Length fields are divided into interior and leaf node versions. * This leaves room for a flags field (OCFS2_EXT_*) in the leaf nodes. */ struct ocfs2_extent_rec { /*00*/ __le32 e_cpos; /* Offset into the file, in clusters */ union { __le32 e_int_clusters; /* Clusters covered by all children */ struct { __le16 e_leaf_clusters; /* Clusters covered by this extent */ __u8 e_reserved1; __u8 e_flags; /* Extent flags */ }; }; __le64 e_blkno; /* Physical disk offset, in blocks */ /*10*/ }; struct ocfs2_chain_rec { __le32 c_free; /* Number of free bits in this chain. */ __le32 c_total; /* Number of total bits in this chain */ __le64 c_blkno; /* Physical disk offset (blocks) of 1st group */ }; struct ocfs2_truncate_rec { __le32 t_start; /* 1st cluster in this log */ __le32 t_clusters; /* Number of total clusters covered */ }; /* * On disk extent list for OCFS2 (node in the tree). Note that this * is contained inside ocfs2_dinode or ocfs2_extent_block, so the * offsets are relative to ocfs2_dinode.id2.i_list or * ocfs2_extent_block.h_list, respectively. */ struct ocfs2_extent_list { /*00*/ __le16 l_tree_depth; /* Extent tree depth from this point. 0 means data extents hang directly off this header (a leaf) NOTE: The high 8 bits cannot be used - tree_depth is never that big. */ __le16 l_count; /* Number of extent records */ __le16 l_next_free_rec; /* Next unused extent slot */ __le16 l_reserved1; __le64 l_reserved2; /* Pad to sizeof(ocfs2_extent_rec) */ /* Extent records */ /*10*/ struct ocfs2_extent_rec l_recs[] __counted_by_le(l_count); }; /* * On disk allocation chain list for OCFS2. Note that this is * contained inside ocfs2_dinode, so the offsets are relative to * ocfs2_dinode.id2.i_chain. */ struct ocfs2_chain_list { /*00*/ __le16 cl_cpg; /* Clusters per Block Group */ __le16 cl_bpc; /* Bits per cluster */ __le16 cl_count; /* Total chains in this list */ __le16 cl_next_free_rec; /* Next unused chain slot */ __le64 cl_reserved1; /* Chain records */ /*10*/ struct ocfs2_chain_rec cl_recs[] __counted_by_le(cl_count); }; /* * On disk deallocation log for OCFS2. Note that this is * contained inside ocfs2_dinode, so the offsets are relative to * ocfs2_dinode.id2.i_dealloc. */ struct ocfs2_truncate_log { /*00*/ __le16 tl_count; /* Total records in this log */ __le16 tl_used; /* Number of records in use */ __le32 tl_reserved1; /* Truncate records */ /*08*/ struct ocfs2_truncate_rec tl_recs[] __counted_by_le(tl_count); }; /* * On disk extent block (indirect block) for OCFS2 */ struct ocfs2_extent_block { /*00*/ __u8 h_signature[8]; /* Signature for verification */ struct ocfs2_block_check h_check; /* Error checking */ /*10*/ __le16 h_suballoc_slot; /* Slot suballocator this extent_header belongs to */ __le16 h_suballoc_bit; /* Bit offset in suballocator block group */ __le32 h_fs_generation; /* Must match super block */ __le64 h_blkno; /* Offset on disk, in blocks */ /*20*/ __le64 h_suballoc_loc; /* Suballocator block group this eb belongs to. Only valid if allocated from a discontiguous block group */ __le64 h_next_leaf_blk; /* Offset on disk, in blocks, of next leaf header pointing to data */ /*30*/ struct ocfs2_extent_list h_list; /* Extent record list */ /* Actual on-disk size is one block */ }; /* * On disk slot map for OCFS2. This defines the contents of the "slot_map" * system file. A slot is valid if it contains a node number >= 0. The * value -1 (0xFFFF) is OCFS2_INVALID_SLOT. This marks a slot empty. */ struct ocfs2_slot_map { /*00*/ DECLARE_FLEX_ARRAY(__le16, sm_slots); /* * Actual on-disk size is one block. OCFS2_MAX_SLOTS is 255, * 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize. */ }; struct ocfs2_extended_slot { /*00*/ __u8 es_valid; __u8 es_reserved1[3]; __le32 es_node_num; /*08*/ }; /* * The extended slot map, used when OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP * is set. It separates out the valid marker from the node number, and * has room to grow. Unlike the old slot map, this format is defined by * i_size. */ struct ocfs2_slot_map_extended { /*00*/ DECLARE_FLEX_ARRAY(struct ocfs2_extended_slot, se_slots); /* * Actual size is i_size of the slot_map system file. It should * match s_max_slots * sizeof(struct ocfs2_extended_slot) */ }; /* * ci_stackflags is only valid if the incompat bit * OCFS2_FEATURE_INCOMPAT_CLUSTERINFO is set. */ struct ocfs2_cluster_info { /*00*/ __u8 ci_stack[OCFS2_STACK_LABEL_LEN]; union { __le32 ci_reserved; struct { __u8 ci_stackflags; __u8 ci_reserved1; __u8 ci_reserved2; __u8 ci_reserved3; }; }; /*08*/ __u8 ci_cluster[OCFS2_CLUSTER_NAME_LEN]; /*18*/ }; /* * On disk superblock for OCFS2 * Note that it is contained inside an ocfs2_dinode, so all offsets * are relative to the start of ocfs2_dinode.id2. */ struct ocfs2_super_block { /*00*/ __le16 s_major_rev_level; __le16 s_minor_rev_level; __le16 s_mnt_count; __le16 s_max_mnt_count; __le16 s_state; /* File system state */ __le16 s_errors; /* Behaviour when detecting errors */ __le32 s_checkinterval; /* Max time between checks */ /*10*/ __le64 s_lastcheck; /* Time of last check */ __le32 s_creator_os; /* OS */ __le32 s_feature_compat; /* Compatible feature set */ /*20*/ __le32 s_feature_incompat; /* Incompatible feature set */ __le32 s_feature_ro_compat; /* Readonly-compatible feature set */ __le64 s_root_blkno; /* Offset, in blocks, of root directory dinode */ /*30*/ __le64 s_system_dir_blkno; /* Offset, in blocks, of system directory dinode */ __le32 s_blocksize_bits; /* Blocksize for this fs */ __le32 s_clustersize_bits; /* Clustersize for this fs */ /*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts before tunefs required */ __le16 s_tunefs_flag; __le32 s_uuid_hash; /* hash value of uuid */ __le64 s_first_cluster_group; /* Block offset of 1st cluster * group header */ /*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */ /*90*/ __u8 s_uuid[OCFS2_VOL_UUID_LEN]; /* 128-bit uuid */ /*A0*/ struct ocfs2_cluster_info s_cluster_info; /* Only valid if either userspace or clusterinfo INCOMPAT flag set. */ /*B8*/ __le16 s_xattr_inline_size; /* extended attribute inline size for this fs*/ __le16 s_reserved0; __le32 s_dx_seed[3]; /* seed[0-2] for dx dir hash. * s_uuid_hash serves as seed[3]. */ /*C8*/ __le64 s_reserved2[15]; /* Fill out superblock */ /*140*/ /* * NOTE: As stated above, all offsets are relative to * ocfs2_dinode.id2, which is at 0xC0 in the inode. * 0xC0 + 0x140 = 0x200 or 512 bytes. A superblock must fit within * our smallest blocksize, which is 512 bytes. To ensure this, * we reserve the space in s_reserved2. Anything past s_reserved2 * will not be available on the smallest blocksize. */ }; /* * Local allocation bitmap for OCFS2 slots * Note that it exists inside an ocfs2_dinode, so all offsets are * relative to the start of ocfs2_dinode.id2. */ struct ocfs2_local_alloc { /*00*/ __le32 la_bm_off; /* Starting bit offset in main bitmap */ __le16 la_size; /* Size of included bitmap, in bytes */ __le16 la_reserved1; __le64 la_reserved2; /*10*/ __u8 la_bitmap[]; }; /* * Data-in-inode header. This is only used if i_dyn_features has * OCFS2_INLINE_DATA_FL set. */ struct ocfs2_inline_data { /*00*/ __le16 id_count; /* Number of bytes that can be used * for data, starting at id_data */ __le16 id_reserved0; __le32 id_reserved1; __u8 id_data[]; /* Start of user data */ }; /* * On disk inode for OCFS2 */ struct ocfs2_dinode { /*00*/ __u8 i_signature[8]; /* Signature for validation */ __le32 i_generation; /* Generation number */ __le16 i_suballoc_slot; /* Slot suballocator this inode belongs to */ __le16 i_suballoc_bit; /* Bit offset in suballocator block group */ /*10*/ __le16 i_links_count_hi; /* High 16 bits of links count */ __le16 i_xattr_inline_size; __le32 i_clusters; /* Cluster count */ __le32 i_uid; /* Owner UID */ __le32 i_gid; /* Owning GID */ /*20*/ __le64 i_size; /* Size in bytes */ __le16 i_mode; /* File mode */ __le16 i_links_count; /* Links count */ __le32 i_flags; /* File flags */ /*30*/ __le64 i_atime; /* Access time */ __le64 i_ctime; /* Creation time */ /*40*/ __le64 i_mtime; /* Modification time */ __le64 i_dtime; /* Deletion time */ /*50*/ __le64 i_blkno; /* Offset on disk, in blocks */ __le64 i_last_eb_blk; /* Pointer to last extent block */ /*60*/ __le32 i_fs_generation; /* Generation per fs-instance */ __le32 i_atime_nsec; __le32 i_ctime_nsec; __le32 i_mtime_nsec; /*70*/ __le32 i_attr; __le16 i_orphaned_slot; /* Only valid when OCFS2_ORPHANED_FL was set in i_flags */ __le16 i_dyn_features; __le64 i_xattr_loc; /*80*/ struct ocfs2_block_check i_check; /* Error checking */ /*88*/ __le64 i_dx_root; /* Pointer to dir index root block */ /*90*/ __le64 i_refcount_loc; __le64 i_suballoc_loc; /* Suballocator block group this inode belongs to. Only valid if allocated from a discontiguous block group */ /*A0*/ __le16 i_dio_orphaned_slot; /* only used for append dio write */ __le16 i_reserved1[3]; __le64 i_reserved2[2]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ struct { __le64 i_rdev; /* Device number */ } dev1; struct { /* Info for bitmap system inodes */ __le32 i_used; /* Bits (ie, clusters) used */ __le32 i_total; /* Total bits (clusters) available */ } bitmap1; struct { /* Info for journal system inodes */ __le32 ij_flags; /* Mounted, version, etc. */ __le32 ij_recovery_generation; /* Incremented when the journal is recovered after an unclean shutdown */ } journal1; } id1; /* Inode type dependent 1 */ /*C0*/ union { struct ocfs2_super_block i_super; struct ocfs2_local_alloc i_lab; struct ocfs2_chain_list i_chain; struct ocfs2_extent_list i_list; struct ocfs2_truncate_log i_dealloc; struct ocfs2_inline_data i_data; DECLARE_FLEX_ARRAY(__u8, i_symlink); } id2; /* Actual on-disk size is one block */ }; /* * On-disk directory entry structure for OCFS2 * * Packed as this structure could be accessed unaligned on 64-bit platforms */ struct ocfs2_dir_entry { /*00*/ __le64 inode; /* Inode number */ __le16 rec_len; /* Directory entry length */ __u8 name_len; /* Name length */ __u8 file_type; /*0C*/ char name[OCFS2_MAX_FILENAME_LEN]; /* File name */ /* Actual on-disk length specified by rec_len */ } __attribute__ ((packed)); /* * Per-block record for the unindexed directory btree. This is carefully * crafted so that the rec_len and name_len records of an ocfs2_dir_entry are * mirrored. That way, the directory manipulation code needs a minimal amount * of update. * * NOTE: Keep this structure aligned to a multiple of 4 bytes. */ struct ocfs2_dir_block_trailer { /*00*/ __le64 db_compat_inode; /* Always zero. Was inode */ __le16 db_compat_rec_len; /* Backwards compatible with * ocfs2_dir_entry. */ __u8 db_compat_name_len; /* Always zero. Was name_len */ __u8 db_reserved0; __le16 db_reserved1; __le16 db_free_rec_len; /* Size of largest empty hole * in this block. (unused) */ /*10*/ __u8 db_signature[8]; /* Signature for verification */ __le64 db_reserved2; /*20*/ __le64 db_free_next; /* Next block in list (unused) */ __le64 db_blkno; /* Offset on disk, in blocks */ /*30*/ __le64 db_parent_dinode; /* dinode which owns me, in blocks */ struct ocfs2_block_check db_check; /* Error checking */ /*40*/ }; /* * A directory entry in the indexed tree. We don't store the full name here, * but instead provide a pointer to the full dirent in the unindexed tree. * * We also store name_len here so as to reduce the number of leaf blocks we * need to search in case of collisions. */ struct ocfs2_dx_entry { __le32 dx_major_hash; /* Used to find logical * cluster in index */ __le32 dx_minor_hash; /* Lower bits used to find * block in cluster */ __le64 dx_dirent_blk; /* Physical block in unindexed * tree holding this dirent. */ }; struct ocfs2_dx_entry_list { __le32 de_reserved; __le16 de_count; /* Maximum number of entries * possible in de_entries */ __le16 de_num_used; /* Current number of * de_entries entries */ /* Indexed dir entries in a packed * array of length de_num_used. */ struct ocfs2_dx_entry de_entries[] __counted_by_le(de_count); }; #define OCFS2_DX_FLAG_INLINE 0x01 /* * A directory indexing block. Each indexed directory has one of these, * pointed to by ocfs2_dinode. * * This block stores an indexed btree root, and a set of free space * start-of-list pointers. */ struct ocfs2_dx_root_block { __u8 dr_signature[8]; /* Signature for verification */ struct ocfs2_block_check dr_check; /* Error checking */ __le16 dr_suballoc_slot; /* Slot suballocator this * block belongs to. */ __le16 dr_suballoc_bit; /* Bit offset in suballocator * block group */ __le32 dr_fs_generation; /* Must match super block */ __le64 dr_blkno; /* Offset on disk, in blocks */ __le64 dr_last_eb_blk; /* Pointer to last * extent block */ __le32 dr_clusters; /* Clusters allocated * to the indexed tree. */ __u8 dr_flags; /* OCFS2_DX_FLAG_* flags */ __u8 dr_reserved0; __le16 dr_reserved1; __le64 dr_dir_blkno; /* Pointer to parent inode */ __le32 dr_num_entries; /* Total number of * names stored in * this directory.*/ __le32 dr_reserved2; __le64 dr_free_blk; /* Pointer to head of free * unindexed block list. */ __le64 dr_suballoc_loc; /* Suballocator block group this root belongs to. Only valid if allocated from a discontiguous block group */ __le64 dr_reserved3[14]; union { struct ocfs2_extent_list dr_list; /* Keep this aligned to 128 * bits for maximum space * efficiency. */ struct ocfs2_dx_entry_list dr_entries; /* In-root-block list of * entries. We grow out * to extents if this * gets too big. */ }; }; /* * The header of a leaf block in the indexed tree. */ struct ocfs2_dx_leaf { __u8 dl_signature[8];/* Signature for verification */ struct ocfs2_block_check dl_check; /* Error checking */ __le64 dl_blkno; /* Offset on disk, in blocks */ __le32 dl_fs_generation;/* Must match super block */ __le32 dl_reserved0; __le64 dl_reserved1; struct ocfs2_dx_entry_list dl_list; }; /* * Largest bitmap for a block (suballocator) group in bytes. This limit * does not affect cluster groups (global allocator). Cluster group * bitmaps run to the end of the block. */ #define OCFS2_MAX_BG_BITMAP_SIZE 256 /* * On disk allocator group structure for OCFS2 */ struct ocfs2_group_desc { /*00*/ __u8 bg_signature[8]; /* Signature for validation */ __le16 bg_size; /* Size of included bitmap in bytes. */ __le16 bg_bits; /* Bits represented by this group. */ __le16 bg_free_bits_count; /* Free bits count */ __le16 bg_chain; /* What chain I am in. */ /*10*/ __le32 bg_generation; __le16 bg_contig_free_bits; /* max contig free bits length */ __le16 bg_reserved1; __le64 bg_next_group; /* Next group in my list, in blocks */ /*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in blocks */ __le64 bg_blkno; /* Offset on disk, in blocks */ /*30*/ struct ocfs2_block_check bg_check; /* Error checking */ __le64 bg_reserved2; /*40*/ union { DECLARE_FLEX_ARRAY(__u8, bg_bitmap); struct { /* * Block groups may be discontiguous when * OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG is set. * The extents of a discontiguous block group are * stored in bg_list. It is a flat list. * l_tree_depth must always be zero. A * discontiguous group is signified by a non-zero * bg_list->l_next_free_rec. Only block groups * can be discontiguous; Cluster groups cannot. * We've never made a block group with more than * 2048 blocks (256 bytes of bg_bitmap). This * codifies that limit so that we can fit bg_list. * bg_size of a discontiguous block group will * be 256 to match bg_bitmap_filler. */ __u8 bg_bitmap_filler[OCFS2_MAX_BG_BITMAP_SIZE]; /*140*/ struct ocfs2_extent_list bg_list; }; }; /* Actual on-disk size is one block */ }; struct ocfs2_refcount_rec { /*00*/ __le64 r_cpos; /* Physical offset, in clusters */ __le32 r_clusters; /* Clusters covered by this extent */ __le32 r_refcount; /* Reference count of this extent */ /*10*/ }; #define OCFS2_32BIT_POS_MASK (0xffffffffULL) #define OCFS2_REFCOUNT_LEAF_FL (0x00000001) #define OCFS2_REFCOUNT_TREE_FL (0x00000002) struct ocfs2_refcount_list { /*00*/ __le16 rl_count; /* Maximum number of entries possible in rl_records */ __le16 rl_used; /* Current number of used records */ __le32 rl_reserved2; __le64 rl_reserved1; /* Pad to sizeof(ocfs2_refcount_record) */ /* Refcount records */ /*10*/ struct ocfs2_refcount_rec rl_recs[] __counted_by_le(rl_count); }; struct ocfs2_refcount_block { /*00*/ __u8 rf_signature[8]; /* Signature for verification */ __le16 rf_suballoc_slot; /* Slot suballocator this block belongs to */ __le16 rf_suballoc_bit; /* Bit offset in suballocator block group */ __le32 rf_fs_generation; /* Must match superblock */ /*10*/ __le64 rf_blkno; /* Offset on disk, in blocks */ __le64 rf_parent; /* Parent block, only valid if OCFS2_REFCOUNT_LEAF_FL is set in rf_flags */ /*20*/ struct ocfs2_block_check rf_check; /* Error checking */ __le64 rf_last_eb_blk; /* Pointer to last extent block */ /*30*/ __le32 rf_count; /* Number of inodes sharing this refcount tree */ __le32 rf_flags; /* See the flags above */ __le32 rf_clusters; /* clusters covered by refcount tree. */ __le32 rf_cpos; /* cluster offset in refcount tree.*/ /*40*/ __le32 rf_generation; /* generation number. all be the same * for the same refcount tree. */ __le32 rf_reserved0; __le64 rf_suballoc_loc; /* Suballocator block group this refcount block belongs to. Only valid if allocated from a discontiguous block group */ /*50*/ __le64 rf_reserved1[6]; /*80*/ union { struct ocfs2_refcount_list rf_records; /* List of refcount records */ struct ocfs2_extent_list rf_list; /* Extent record list, only valid if OCFS2_REFCOUNT_TREE_FL is set in rf_flags */ }; /* Actual on-disk size is one block */ }; /* * On disk extended attribute structure for OCFS2. */ /* * ocfs2_xattr_entry indicates one extend attribute. * * Note that it can be stored in inode, one block or one xattr bucket. */ struct ocfs2_xattr_entry { __le32 xe_name_hash; /* hash value of xattr prefix+suffix. */ __le16 xe_name_offset; /* byte offset from the 1st entry in the local xattr storage(inode, xattr block or xattr bucket). */ __u8 xe_name_len; /* xattr name len, doesn't include prefix. */ __u8 xe_type; /* the low 7 bits indicate the name prefix * type and the highest bit indicates whether * the EA is stored in the local storage. */ __le64 xe_value_size; /* real xattr value length. */ }; /* * On disk structure for xattr header. * * One ocfs2_xattr_header describes how many ocfs2_xattr_entry records in * the local xattr storage. */ struct ocfs2_xattr_header { __le16 xh_count; /* contains the count of how many records are in the local xattr storage. */ __le16 xh_free_start; /* current offset for storing xattr. */ __le16 xh_name_value_len; /* total length of name/value length in this bucket. */ __le16 xh_num_buckets; /* Number of xattr buckets in this extent record, only valid in the first bucket. */ struct ocfs2_block_check xh_check; /* Error checking (Note, this is only used for xattr buckets. A block uses xb_check and sets this field to zero.) */ /* xattr entry list. */ struct ocfs2_xattr_entry xh_entries[] __counted_by_le(xh_count); }; /* * On disk structure for xattr value root. * * When an xattr's value is large enough, it is stored in an external * b-tree like file data. The xattr value root points to this structure. */ struct ocfs2_xattr_value_root { /*00*/ __le32 xr_clusters; /* clusters covered by xattr value. */ __le32 xr_reserved0; __le64 xr_last_eb_blk; /* Pointer to last extent block */ /*10*/ struct ocfs2_extent_list xr_list; /* Extent record list */ }; /* * On disk structure for xattr tree root. * * It is used when there are too many extended attributes for one file. These * attributes will be organized and stored in an indexed-btree. */ struct ocfs2_xattr_tree_root { /*00*/ __le32 xt_clusters; /* clusters covered by xattr. */ __le32 xt_reserved0; __le64 xt_last_eb_blk; /* Pointer to last extent block */ /*10*/ struct ocfs2_extent_list xt_list; /* Extent record list */ }; #define OCFS2_XATTR_INDEXED 0x1 #define OCFS2_HASH_SHIFT 5 #define OCFS2_XATTR_ROUND 3 #define OCFS2_XATTR_SIZE(size) (((size) + OCFS2_XATTR_ROUND) & \ ~(OCFS2_XATTR_ROUND)) #define OCFS2_XATTR_BUCKET_SIZE 4096 #define OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET (OCFS2_XATTR_BUCKET_SIZE \ / OCFS2_MIN_BLOCKSIZE) /* * On disk structure for xattr block. */ struct ocfs2_xattr_block { /*00*/ __u8 xb_signature[8]; /* Signature for verification */ __le16 xb_suballoc_slot; /* Slot suballocator this block belongs to. */ __le16 xb_suballoc_bit; /* Bit offset in suballocator block group */ __le32 xb_fs_generation; /* Must match super block */ /*10*/ __le64 xb_blkno; /* Offset on disk, in blocks */ struct ocfs2_block_check xb_check; /* Error checking */ /*20*/ __le16 xb_flags; /* Indicates whether this block contains real xattr or a xattr tree. */ __le16 xb_reserved0; __le32 xb_reserved1; __le64 xb_suballoc_loc; /* Suballocator block group this xattr block belongs to. Only valid if allocated from a discontiguous block group */ /*30*/ union { struct ocfs2_xattr_header xb_header; /* xattr header if this block contains xattr */ struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this block contains xattr tree. */ } xb_attrs; }; #define OCFS2_XATTR_ENTRY_LOCAL 0x80 #define OCFS2_XATTR_TYPE_MASK 0x7F static inline void ocfs2_xattr_set_local(struct ocfs2_xattr_entry *xe, int local) { if (local) xe->xe_type |= OCFS2_XATTR_ENTRY_LOCAL; else xe->xe_type &= ~OCFS2_XATTR_ENTRY_LOCAL; } static inline int ocfs2_xattr_is_local(struct ocfs2_xattr_entry *xe) { return xe->xe_type & OCFS2_XATTR_ENTRY_LOCAL; } static inline void ocfs2_xattr_set_type(struct ocfs2_xattr_entry *xe, int type) { xe->xe_type |= type & OCFS2_XATTR_TYPE_MASK; } static inline int ocfs2_xattr_get_type(struct ocfs2_xattr_entry *xe) { return xe->xe_type & OCFS2_XATTR_TYPE_MASK; } /* * On disk structures for global quota file */ /* Magic numbers and known versions for global quota files */ #define OCFS2_GLOBAL_QMAGICS {\ 0x0cf52470, /* USRQUOTA */ \ 0x0cf52471 /* GRPQUOTA */ \ } #define OCFS2_GLOBAL_QVERSIONS {\ 0, \ 0, \ } /* Each block of each quota file has a certain fixed number of bytes reserved * for OCFS2 internal use at its end. OCFS2 can use it for things like * checksums, etc. */ #define OCFS2_QBLK_RESERVED_SPACE 8 /* Generic header of all quota files */ struct ocfs2_disk_dqheader { __le32 dqh_magic; /* Magic number identifying file */ __le32 dqh_version; /* Quota format version */ }; #define OCFS2_GLOBAL_INFO_OFF (sizeof(struct ocfs2_disk_dqheader)) /* Information header of global quota file (immediately follows the generic * header) */ struct ocfs2_global_disk_dqinfo { /*00*/ __le32 dqi_bgrace; /* Grace time for space softlimit excess */ __le32 dqi_igrace; /* Grace time for inode softlimit excess */ __le32 dqi_syncms; /* Time after which we sync local changes to * global quota file */ __le32 dqi_blocks; /* Number of blocks in quota file */ /*10*/ __le32 dqi_free_blk; /* First free block in quota file */ __le32 dqi_free_entry; /* First block with free dquot entry in quota * file */ }; /* Structure with global user / group information. We reserve some space * for future use. */ struct ocfs2_global_disk_dqblk { /*00*/ __le32 dqb_id; /* ID the structure belongs to */ __le32 dqb_use_count; /* Number of nodes having reference to this structure */ __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */ /*10*/ __le64 dqb_isoftlimit; /* preferred inode limit */ __le64 dqb_curinodes; /* current # allocated inodes */ /*20*/ __le64 dqb_bhardlimit; /* absolute limit on disk space */ __le64 dqb_bsoftlimit; /* preferred limit on disk space */ /*30*/ __le64 dqb_curspace; /* current space occupied */ __le64 dqb_btime; /* time limit for excessive disk use */ /*40*/ __le64 dqb_itime; /* time limit for excessive inode use */ __le64 dqb_pad1; /*50*/ __le64 dqb_pad2; }; /* * On-disk structures for local quota file */ /* Magic numbers and known versions for local quota files */ #define OCFS2_LOCAL_QMAGICS {\ 0x0cf524c0, /* USRQUOTA */ \ 0x0cf524c1 /* GRPQUOTA */ \ } #define OCFS2_LOCAL_QVERSIONS {\ 0, \ 0, \ } /* Quota flags in dqinfo header */ #define OLQF_CLEAN 0x0001 /* Quota file is empty (this should be after\ * quota has been cleanly turned off) */ #define OCFS2_LOCAL_INFO_OFF (sizeof(struct ocfs2_disk_dqheader)) /* Information header of local quota file (immediately follows the generic * header) */ struct ocfs2_local_disk_dqinfo { __le32 dqi_flags; /* Flags for quota file */ __le32 dqi_chunks; /* Number of chunks of quota structures * with a bitmap */ __le32 dqi_blocks; /* Number of blocks allocated for quota file */ }; /* Header of one chunk of a quota file */ struct ocfs2_local_disk_chunk { __le32 dqc_free; /* Number of free entries in the bitmap */ __u8 dqc_bitmap[]; /* Bitmap of entries in the corresponding * chunk of quota file */ }; /* One entry in local quota file */ struct ocfs2_local_disk_dqblk { /*00*/ __le64 dqb_id; /* id this quota applies to */ __le64 dqb_spacemod; /* Change in the amount of used space */ /*10*/ __le64 dqb_inodemod; /* Change in the amount of used inodes */ }; /* * The quota trailer lives at the end of each quota block. */ struct ocfs2_disk_dqtrailer { /*00*/ struct ocfs2_block_check dq_check; /* Error checking */ /*08*/ /* Cannot be larger than OCFS2_QBLK_RESERVED_SPACE */ }; static inline struct ocfs2_disk_dqtrailer *ocfs2_block_dqtrailer(int blocksize, void *buf) { char *ptr = buf; ptr += blocksize - OCFS2_QBLK_RESERVED_SPACE; return (struct ocfs2_disk_dqtrailer *)ptr; } #ifdef __KERNEL__ static inline int ocfs2_fast_symlink_chars(struct super_block *sb) { return sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } static inline int ocfs2_max_inline_data_with_xattr(struct super_block *sb, struct ocfs2_dinode *di) { unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) return sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data) - xattrsize; else return sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_extent_recs_per_inode_with_xattr( struct super_block *sb, struct ocfs2_dinode *di) { int size; unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_list.l_recs) - xattrsize; else size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_extent_recs_per_dx_root(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dx_root_block, dr_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_chain_recs_per_inode(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs); return size / sizeof(struct ocfs2_chain_rec); } static inline u16 ocfs2_extent_recs_per_eb(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_extent_block, h_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline u16 ocfs2_extent_recs_per_gd(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_group_desc, bg_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_dx_entries_per_leaf(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dx_leaf, dl_list.de_entries); return size / sizeof(struct ocfs2_dx_entry); } static inline int ocfs2_dx_entries_per_root(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dx_root_block, dr_entries.de_entries); return size / sizeof(struct ocfs2_dx_entry); } static inline u16 ocfs2_local_alloc_size(struct super_block *sb) { u16 size; size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap); return size; } static inline int ocfs2_group_bitmap_size(struct super_block *sb, int suballocator, u32 feature_incompat) { int size = sb->s_blocksize - offsetof(struct ocfs2_group_desc, bg_bitmap); /* * The cluster allocator uses the entire block. Suballocators have * never used more than OCFS2_MAX_BG_BITMAP_SIZE. Unfortunately, older * code expects bg_size set to the maximum. Thus we must keep * bg_size as-is unless discontig_bg is enabled. */ if (suballocator && (feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)) size = OCFS2_MAX_BG_BITMAP_SIZE; return size; } static inline int ocfs2_truncate_recs_per_inode(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs); return size / sizeof(struct ocfs2_truncate_rec); } static inline u64 ocfs2_backup_super_blkno(struct super_block *sb, int index) { u64 offset = OCFS2_BACKUP_SB_START; if (index >= 0 && index < OCFS2_MAX_BACKUP_SUPERBLOCKS) { offset <<= (2 * index); offset >>= sb->s_blocksize_bits; return offset; } return 0; } static inline u16 ocfs2_xattr_recs_per_xb(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_xattr_block, xb_attrs.xb_root.xt_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline u16 ocfs2_extent_recs_per_rb(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_refcount_block, rf_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline u16 ocfs2_refcount_recs_per_rb(struct super_block *sb) { int size; size = sb->s_blocksize - offsetof(struct ocfs2_refcount_block, rf_records.rl_recs); return size / sizeof(struct ocfs2_refcount_rec); } static inline u32 ocfs2_get_ref_rec_low_cpos(const struct ocfs2_refcount_rec *rec) { return le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; } #else static inline int ocfs2_fast_symlink_chars(int blocksize) { return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } static inline int ocfs2_max_inline_data_with_xattr(int blocksize, struct ocfs2_dinode *di) { if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data) - di->i_xattr_inline_size; else return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_dinode, id2.i_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_chain_recs_per_inode(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs); return size / sizeof(struct ocfs2_chain_rec); } static inline int ocfs2_extent_recs_per_eb(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_extent_block, h_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_extent_recs_per_gd(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_group_desc, bg_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } static inline int ocfs2_local_alloc_size(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap); return size; } static inline int ocfs2_group_bitmap_size(int blocksize, int suballocator, uint32_t feature_incompat) { int size = sb->s_blocksize - offsetof(struct ocfs2_group_desc, bg_bitmap); /* * The cluster allocator uses the entire block. Suballocators have * never used more than OCFS2_MAX_BG_BITMAP_SIZE. Unfortunately, older * code expects bg_size set to the maximum. Thus we must keep * bg_size as-is unless discontig_bg is enabled. */ if (suballocator && (feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)) size = OCFS2_MAX_BG_BITMAP_SIZE; return size; } static inline int ocfs2_truncate_recs_per_inode(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs); return size / sizeof(struct ocfs2_truncate_rec); } static inline uint64_t ocfs2_backup_super_blkno(int blocksize, int index) { uint64_t offset = OCFS2_BACKUP_SB_START; if (index >= 0 && index < OCFS2_MAX_BACKUP_SUPERBLOCKS) { offset <<= (2 * index); offset /= blocksize; return offset; } return 0; } static inline int ocfs2_xattr_recs_per_xb(int blocksize) { int size; size = blocksize - offsetof(struct ocfs2_xattr_block, xb_attrs.xb_root.xt_list.l_recs); return size / sizeof(struct ocfs2_extent_rec); } #endif /* __KERNEL__ */ static inline int ocfs2_system_inode_is_global(int type) { return ((type >= 0) && (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE)); } static inline int ocfs2_sprintf_system_inode_name(char *buf, int len, int type, int slot) { int chars; /* * Global system inodes can only have one copy. Everything * after OCFS2_LAST_GLOBAL_SYSTEM_INODE in the system inode * list has a copy per slot. */ if (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE) chars = snprintf(buf, len, "%s", ocfs2_system_inodes[type].si_name); else chars = snprintf(buf, len, ocfs2_system_inodes[type].si_name, slot); return chars; } static inline void ocfs2_set_de_type(struct ocfs2_dir_entry *de, umode_t mode) { de->file_type = fs_umode_to_ftype(mode); } static inline int ocfs2_gd_is_discontig(struct ocfs2_group_desc *gd) { if ((offsetof(struct ocfs2_group_desc, bg_bitmap) + le16_to_cpu(gd->bg_size)) != offsetof(struct ocfs2_group_desc, bg_list)) return 0; /* * Only valid to check l_next_free_rec if * bg_bitmap + bg_size == bg_list. */ if (!gd->bg_list.l_next_free_rec) return 0; return 1; } #endif /* _OCFS2_FS_H */ |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 | // SPDX-License-Identifier: GPL-2.0 /* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. * * File es58x_devlink.c: report the product information using devlink. * * Copyright (c) 2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> */ #include <linux/ctype.h> #include <linux/device.h> #include <linux/usb.h> #include <net/devlink.h> #include "es58x_core.h" /* USB descriptor index containing the product information string. */ #define ES58X_PROD_INFO_IDX 6 /** * es58x_parse_sw_version() - Extract boot loader or firmware version. * @es58x_dev: ES58X device. * @prod_info: USB custom string returned by the device. * @prefix: Select which information should be parsed. Set it to "FW" * to parse the firmware version or to "BL" to parse the * bootloader version. * * The @prod_info string contains the firmware and the bootloader * version number all prefixed by a magic string and concatenated with * other numbers. Depending on the device, the firmware (bootloader) * format is either "FW_Vxx.xx.xx" ("BL_Vxx.xx.xx") or "FW:xx.xx.xx" * ("BL:xx.xx.xx") where 'x' represents a digit. @prod_info must * contains the common part of those prefixes: "FW" or "BL". * * Parse @prod_info and store the version number in * &es58x_dev.firmware_version or &es58x_dev.bootloader_version * according to @prefix value. * * Return: zero on success, -EINVAL if @prefix contains an invalid * value and -EBADMSG if @prod_info could not be parsed. */ static int es58x_parse_sw_version(struct es58x_device *es58x_dev, const char *prod_info, const char *prefix) { struct es58x_sw_version *version; int major, minor, revision; if (!strcmp(prefix, "FW")) version = &es58x_dev->firmware_version; else if (!strcmp(prefix, "BL")) version = &es58x_dev->bootloader_version; else return -EINVAL; /* Go to prefix */ prod_info = strstr(prod_info, prefix); if (!prod_info) return -EBADMSG; /* Go to beginning of the version number */ while (!isdigit(*prod_info)) { prod_info++; if (!*prod_info) return -EBADMSG; } if (sscanf(prod_info, "%2u.%2u.%2u", &major, &minor, &revision) != 3) return -EBADMSG; version->major = major; version->minor = minor; version->revision = revision; return 0; } /** * es58x_parse_hw_rev() - Extract hardware revision number. * @es58x_dev: ES58X device. * @prod_info: USB custom string returned by the device. * * @prod_info contains the hardware revision prefixed by a magic * string and conquenated together with other numbers. Depending on * the device, the hardware revision format is either * "HW_VER:axxx/xxx" or "HR:axxx/xxx" where 'a' represents a letter * and 'x' a digit. * * Parse @prod_info and store the hardware revision number in * &es58x_dev.hardware_revision. * * Return: zero on success, -EBADMSG if @prod_info could not be * parsed. */ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev, const char *prod_info) { char letter; int major, minor; /* The only occurrence of 'H' is in the hardware revision prefix. */ prod_info = strchr(prod_info, 'H'); if (!prod_info) return -EBADMSG; /* Go to beginning of the hardware revision */ prod_info = strchr(prod_info, ':'); if (!prod_info) return -EBADMSG; prod_info++; if (sscanf(prod_info, "%c%3u/%3u", &letter, &major, &minor) != 3) return -EBADMSG; es58x_dev->hardware_revision.letter = letter; es58x_dev->hardware_revision.major = major; es58x_dev->hardware_revision.minor = minor; return 0; } /** * es58x_parse_product_info() - Parse the ES58x product information * string. * @es58x_dev: ES58X device. * * Retrieve the product information string and parse it to extract the * firmware version, the bootloader version and the hardware * revision. * * If the function fails, set the version or revision to an invalid * value and emit an informal message. Continue probing because the * product information is not critical for the driver to operate. */ void es58x_parse_product_info(struct es58x_device *es58x_dev) { static const struct es58x_sw_version sw_version_not_set = { .major = -1, .minor = -1, .revision = -1, }; static const struct es58x_hw_revision hw_revision_not_set = { .letter = '\0', .major = -1, .minor = -1, }; char *prod_info; es58x_dev->firmware_version = sw_version_not_set; es58x_dev->bootloader_version = sw_version_not_set; es58x_dev->hardware_revision = hw_revision_not_set; prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX); if (!prod_info) { dev_warn(es58x_dev->dev, "could not retrieve the product info string\n"); return; } if (es58x_parse_sw_version(es58x_dev, prod_info, "FW") || es58x_parse_sw_version(es58x_dev, prod_info, "BL") || es58x_parse_hw_rev(es58x_dev, prod_info)) dev_info(es58x_dev->dev, "could not parse product info: '%s'\n", prod_info); kfree(prod_info); } /** * es58x_sw_version_is_valid() - Check if the version is a valid number. * @sw_ver: Version number of either the firmware or the bootloader. * * If any of the software version sub-numbers do not fit on two * digits, the version is invalid, most probably because the product * string could not be parsed. * * Return: @true if the software version is valid, @false otherwise. */ static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver) { return sw_ver->major < 100 && sw_ver->minor < 100 && sw_ver->revision < 100; } /** * es58x_hw_revision_is_valid() - Check if the revision is a valid number. * @hw_rev: Revision number of the hardware. * * If &es58x_hw_revision.letter is not a alphanumeric character or if * any of the hardware revision sub-numbers do not fit on three * digits, the revision is invalid, most probably because the product * string could not be parsed. * * Return: @true if the hardware revision is valid, @false otherwise. */ static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev) { return isalnum(hw_rev->letter) && hw_rev->major < 1000 && hw_rev->minor < 1000; } /** * es58x_devlink_info_get() - Report the product information. * @devlink: Devlink. * @req: skb wrapper where to put requested information. * @extack: Unused. * * Report the firmware version, the bootloader version, the hardware * revision and the serial number through netlink. * * Return: zero on success, errno when any error occurs. */ static int es58x_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct es58x_device *es58x_dev = devlink_priv(devlink); struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version; struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version; struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision; char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; int ret = 0; if (es58x_sw_version_is_valid(fw_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", fw_ver->major, fw_ver->minor, fw_ver->revision); ret = devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW, buf); if (ret) return ret; } if (es58x_sw_version_is_valid(bl_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", bl_ver->major, bl_ver->minor, bl_ver->revision); ret = devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER, buf); if (ret) return ret; } if (es58x_hw_revision_is_valid(hw_rev)) { snprintf(buf, sizeof(buf), "%c%03u/%03u", hw_rev->letter, hw_rev->major, hw_rev->minor); ret = devlink_info_version_fixed_put(req, DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, buf); if (ret) return ret; } if (es58x_dev->udev->serial) ret = devlink_info_serial_number_put(req, es58x_dev->udev->serial); return ret; } const struct devlink_ops es58x_dl_ops = { .info_get = es58x_devlink_info_get, }; |
| 4 4 115 960 937 114 959 19 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BLK_MQ_H #define BLK_MQ_H #include <linux/blkdev.h> #include <linux/sbitmap.h> #include <linux/lockdep.h> #include <linux/scatterlist.h> #include <linux/prefetch.h> #include <linux/srcu.h> #include <linux/rw_hint.h> #include <linux/rwsem.h> struct blk_mq_tags; struct blk_flush_queue; #define BLKDEV_MIN_RQ 4 #define BLKDEV_DEFAULT_RQ 128 enum rq_end_io_ret { RQ_END_IO_NONE, RQ_END_IO_FREE, }; typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); /* * request flags */ typedef __u32 __bitwise req_flags_t; /* Keep rqf_name[] in sync with the definitions below */ enum rqf_flags { /* drive already may have started this one */ __RQF_STARTED, /* request for flush sequence */ __RQF_FLUSH_SEQ, /* merge of different types, fail separately */ __RQF_MIXED_MERGE, /* don't call prep for this one */ __RQF_DONTPREP, /* use hctx->sched_tags */ __RQF_SCHED_TAGS, /* use an I/O scheduler for this request */ __RQF_USE_SCHED, /* vaguely specified driver internal error. Ignored by block layer */ __RQF_FAILED, /* don't warn about errors */ __RQF_QUIET, /* account into disk and partition IO statistics */ __RQF_IO_STAT, /* runtime pm request */ __RQF_PM, /* on IO scheduler merge hash */ __RQF_HASHED, /* track IO completion time */ __RQF_STATS, /* Look at ->special_vec for the actual data payload instead of the bio chain. */ __RQF_SPECIAL_PAYLOAD, /* request completion needs to be signaled to zone write plugging. */ __RQF_ZONE_WRITE_PLUGGING, /* ->timeout has been called, don't expire again */ __RQF_TIMED_OUT, __RQF_RESV, __RQF_BITS }; #define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED)) #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ)) #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE)) #define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP)) #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS)) #define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED)) #define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED)) #define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET)) #define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT)) #define RQF_PM ((__force req_flags_t)(1 << __RQF_PM)) #define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED)) #define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS)) #define RQF_SPECIAL_PAYLOAD \ ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD)) #define RQF_ZONE_WRITE_PLUGGING \ ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING)) #define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT)) #define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) enum mq_rq_state { MQ_RQ_IDLE = 0, MQ_RQ_IN_FLIGHT = 1, MQ_RQ_COMPLETE = 2, }; /* * Try to put the fields that are referenced together in the same cacheline. * * If you modify this structure, make sure to update blk_rq_init() and * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { struct request_queue *q; struct blk_mq_ctx *mq_ctx; struct blk_mq_hw_ctx *mq_hctx; blk_opf_t cmd_flags; /* op and common flags */ req_flags_t rq_flags; int tag; int internal_tag; unsigned int timeout; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ struct bio *bio; struct bio *biotail; union { struct list_head queuelist; struct request *rq_next; }; struct block_device *part; #ifdef CONFIG_BLK_RQ_ALLOC_TIME /* Time that the first bio started allocating this request. */ u64 alloc_time_ns; #endif /* Time that this request was allocated for this IO. */ u64 start_time_ns; /* Time that I/O was submitted to the device. */ u64 io_start_time_ns; #ifdef CONFIG_BLK_WBT unsigned short wbt_flags; #endif /* * rq sectors used for blk stats. It has the same value * with blk_rq_sectors(rq), except that it never be zeroed * by completion. */ unsigned short stats_sectors; /* * Number of scatter-gather DMA addr+len pairs after * physical address coalescing is performed. */ unsigned short nr_phys_segments; unsigned short nr_integrity_segments; /* * The lowest set bit for address gaps between physical segments. This * provides information necessary for dma optimization opprotunities, * like for testing if the segments can be coalesced against the * device's iommu granule. */ unsigned char phys_gap_bit; #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct bio_crypt_ctx *crypt_ctx; struct blk_crypto_keyslot *crypt_keyslot; #endif enum mq_rq_state state; atomic_t ref; unsigned long deadline; /* * The hash is used inside the scheduler, and killed once the * request reaches the dispatch list. The ipi_list is only used * to queue the request for softirq completion, which is long * after the request has been unhashed (and even removed from * the dispatch list). */ union { struct hlist_node hash; /* merge hash */ struct llist_node ipi_list; }; /* * The rb_node is only used inside the io scheduler, requests * are pruned when moved to the dispatch queue. special_vec must * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be * insert into an IO scheduler. */ union { struct rb_node rb_node; /* sort/lookup */ struct bio_vec special_vec; }; /* * Three pointers are available for the IO schedulers, if they need * more they have to dynamically allocate it. */ struct { struct io_cq *icq; void *priv[2]; } elv; struct { unsigned int seq; rq_end_io_fn *saved_end_io; } flush; u64 fifo_time; /* * completion callback. */ rq_end_io_fn *end_io; void *end_io_data; }; /* * Returns a mask with all bits starting at req->phys_gap_bit set to 1. */ static inline unsigned long req_phys_gap_mask(const struct request *req) { return ~(((1 << req->phys_gap_bit) >> 1) - 1); } static inline enum req_op req_op(const struct request *req) { return req->cmd_flags & REQ_OP_MASK; } static inline bool blk_rq_is_passthrough(struct request *rq) { return blk_op_is_passthrough(rq->cmd_flags); } static inline unsigned short req_get_ioprio(struct request *req) { if (req->bio) return req->bio->bi_ioprio; return 0; } #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) #define rq_dma_dir(rq) \ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) static inline int rq_list_empty(const struct rq_list *rl) { return rl->head == NULL; } static inline void rq_list_init(struct rq_list *rl) { rl->head = NULL; rl->tail = NULL; } static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) { rq->rq_next = NULL; if (rl->tail) rl->tail->rq_next = rq; else rl->head = rq; rl->tail = rq; } static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) { rq->rq_next = rl->head; rl->head = rq; if (!rl->tail) rl->tail = rq; } static inline struct request *rq_list_pop(struct rq_list *rl) { struct request *rq = rl->head; if (rq) { rl->head = rl->head->rq_next; if (!rl->head) rl->tail = NULL; rq->rq_next = NULL; } return rq; } static inline struct request *rq_list_peek(struct rq_list *rl) { return rl->head; } #define rq_list_for_each(rl, pos) \ for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next) #define rq_list_for_each_safe(rl, pos, nxt) \ for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \ pos; pos = nxt, nxt = pos ? pos->rq_next : NULL) /** * enum blk_eh_timer_return - How the timeout handler should proceed * @BLK_EH_DONE: The block driver completed the command or will complete it at * a later time. * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the * request to complete. */ enum blk_eh_timer_return { BLK_EH_DONE, BLK_EH_RESET_TIMER, }; /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware * block device */ struct blk_mq_hw_ctx { struct { /** @lock: Protects the dispatch list. */ spinlock_t lock; /** * @dispatch: Used for requests that are ready to be * dispatched to the hardware but for some reason (e.g. lack of * resources) could not be sent to the hardware. As soon as the * driver can send new requests, requests at this list will * be sent first for a fairer dispatch. */ struct list_head dispatch; /** * @state: BLK_MQ_S_* flags. Defines the state of the hw * queue (active, scheduled to restart, stopped). */ unsigned long state; } ____cacheline_aligned_in_smp; /** * @run_work: Used for scheduling a hardware queue run at a later time. */ struct delayed_work run_work; /** @cpumask: Map of available CPUs where this hctx can run. */ cpumask_var_t cpumask; /** * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU * selection from @cpumask. */ int next_cpu; /** * @next_cpu_batch: Counter of how many works left in the batch before * changing to the next CPU. */ int next_cpu_batch; /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ unsigned long flags; /** * @sched_data: Pointer owned by the IO scheduler attached to a request * queue. It's up to the IO scheduler how to use this pointer. */ void *sched_data; /** * @queue: Pointer to the request queue that owns this hardware context. */ struct request_queue *queue; /** @fq: Queue of requests that need to perform a flush operation. */ struct blk_flush_queue *fq; /** * @driver_data: Pointer to data owned by the block driver that created * this hctx */ void *driver_data; /** * @ctx_map: Bitmap for each software queue. If bit is on, there is a * pending request in that software queue. */ struct sbitmap ctx_map; /** * @dispatch_from: Software queue to be used when no scheduler was * selected. */ struct blk_mq_ctx *dispatch_from; /** * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to * decide if the hw_queue is busy using Exponential Weighted Moving * Average algorithm. */ unsigned int dispatch_busy; /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ unsigned short type; /** @nr_ctx: Number of software queues. */ unsigned short nr_ctx; /** @ctxs: Array of software queues. */ struct blk_mq_ctx **ctxs; /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ spinlock_t dispatch_wait_lock; /** * @dispatch_wait: Waitqueue to put requests when there is no tag * available at the moment, to wait for another try in the future. */ wait_queue_entry_t dispatch_wait; /** * @wait_index: Index of next available dispatch_wait queue to insert * requests. */ atomic_t wait_index; /** * @tags: Tags owned by the block driver. A tag at this set is only * assigned when a request is dispatched from a hardware queue. */ struct blk_mq_tags *tags; /** * @sched_tags: Tags owned by I/O scheduler. If there is an I/O * scheduler associated with a request queue, a tag is assigned when * that request is allocated. Else, this member is not used. */ struct blk_mq_tags *sched_tags; /** @numa_node: NUMA node the storage adapter has been connected to. */ unsigned int numa_node; /** @queue_num: Index of this hardware queue. */ unsigned int queue_num; /** * @nr_active: Number of active requests. Only used when a tag set is * shared across request queues. */ atomic_t nr_active; /** @cpuhp_online: List to store request if CPU is going to die */ struct hlist_node cpuhp_online; /** @cpuhp_dead: List to store request if some CPU die. */ struct hlist_node cpuhp_dead; /** @kobj: Kernel object for sysfs. */ struct kobject kobj; #ifdef CONFIG_BLK_DEBUG_FS /** * @debugfs_dir: debugfs directory for this hardware queue. Named * as cpu<cpu_number>. */ struct dentry *debugfs_dir; /** @sched_debugfs_dir: debugfs directory for the scheduler. */ struct dentry *sched_debugfs_dir; #endif /** * @hctx_list: if this hctx is not in use, this is an entry in * q->unused_hctx_list. */ struct list_head hctx_list; }; /** * struct blk_mq_queue_map - Map software queues to hardware queues * @mq_map: CPU ID to hardware queue index map. This is an array * with nr_cpu_ids elements. Each element has a value in the range * [@queue_offset, @queue_offset + @nr_queues). * @nr_queues: Number of hardware queues to map CPU IDs onto. * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe * driver to map each hardware queue type (enum hctx_type) onto a distinct * set of hardware queues. */ struct blk_mq_queue_map { unsigned int *mq_map; unsigned int nr_queues; unsigned int queue_offset; }; /** * enum hctx_type - Type of hardware queue * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. * @HCTX_TYPE_READ: Just for READ I/O. * @HCTX_TYPE_POLL: Polled I/O of any kind. * @HCTX_MAX_TYPES: Number of types of hctx. */ enum hctx_type { HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL, HCTX_MAX_TYPES, }; /** * struct blk_mq_tag_set - tag set that can be shared between request queues * @ops: Pointers to functions that implement block driver behavior. * @map: One or more ctx -> hctx mappings. One map exists for each * hardware queue type (enum hctx_type) that the driver wishes * to support. There are no restrictions on maps being of the * same size, and it's perfectly legal to share maps between * types. * @nr_maps: Number of elements in the @map array. A number in the range * [1, HCTX_MAX_TYPES]. * @nr_hw_queues: Number of hardware queues supported by the block driver that * owns this data structure. * @queue_depth: Number of tags per hardware queue, reserved tags included. * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag * allocations. * @cmd_size: Number of additional bytes to allocate per request. The block * driver owns these additional bytes. * @numa_node: NUMA node the storage adapter has been connected to. * @timeout: Request processing timeout in jiffies. * @flags: Zero or more BLK_MQ_F_* flags. * @driver_data: Pointer to data owned by the block driver that created this * tag set. * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * elements. * @shared_tags: * Shared set of tags. Has @nr_hw_queues elements. If set, * shared by all @tags. * @tag_list_lock: Serializes tag_list accesses. * @tag_list: List of the request queues that use this tag set. See also * request_queue.tag_set_list. * @srcu: Use as lock when type of the request queue is blocking * (BLK_MQ_F_BLOCKING). * @tags_srcu: SRCU used to defer freeing of tags page_list to prevent * use-after-free when iterating tags. * @update_nr_hwq_lock: * Synchronize updating nr_hw_queues with add/del disk & * switching elevator. */ struct blk_mq_tag_set { const struct blk_mq_ops *ops; struct blk_mq_queue_map map[HCTX_MAX_TYPES]; unsigned int nr_maps; unsigned int nr_hw_queues; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; int numa_node; unsigned int timeout; unsigned int flags; void *driver_data; struct blk_mq_tags **tags; struct blk_mq_tags *shared_tags; struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; struct srcu_struct tags_srcu; struct rw_semaphore update_nr_hwq_lock; }; /** * struct blk_mq_queue_data - Data about a request inserted in a queue * * @rq: Request pointer. * @last: If it is the last request in the queue. */ struct blk_mq_queue_data { struct request *rq; bool last; }; typedef bool (busy_tag_iter_fn)(struct request *, void *); /** * struct blk_mq_ops - Callback functions that implements block driver * behaviour. */ struct blk_mq_ops { /** * @queue_rq: Queue a new request from block IO. */ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); /** * @commit_rqs: If a driver uses bd->last to judge when to submit * requests to hardware, it must define this function. In case of errors * that make us stop issuing further requests, this hook serves the * purpose of kicking the hardware (which the last request otherwise * would have done). */ void (*commit_rqs)(struct blk_mq_hw_ctx *); /** * @queue_rqs: Queue a list of new requests. Driver is guaranteed * that each request belongs to the same queue. If the driver doesn't * empty the @rqlist completely, then the rest will be queued * individually by the block layer upon return. */ void (*queue_rqs)(struct rq_list *rqlist); /** * @get_budget: Reserve budget before queue request, once .queue_rq is * run, it is driver's responsibility to release the * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ int (*get_budget)(struct request_queue *); /** * @put_budget: Release the reserved budget. */ void (*put_budget)(struct request_queue *, int); /** * @set_rq_budget_token: store rq's budget token */ void (*set_rq_budget_token)(struct request *, int); /** * @get_rq_budget_token: retrieve rq's budget token */ int (*get_rq_budget_token)(struct request *); /** * @timeout: Called on request timeout. */ enum blk_eh_timer_return (*timeout)(struct request *); /** * @poll: Called to poll for completion of a specific tag. */ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); /** * @complete: Mark the request as complete. */ void (*complete)(struct request *); /** * @init_hctx: Called when the block layer side of a hardware queue has * been set up, allowing the driver to allocate/init matching * structures. */ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); /** * @exit_hctx: Ditto for exit/teardown. */ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); /** * @init_request: Called for every command allocated by the block layer * to allow the driver to set up driver specific data. * * Tag greater than or equal to queue_depth is for setting up * flush request. */ int (*init_request)(struct blk_mq_tag_set *set, struct request *, unsigned int, unsigned int); /** * @exit_request: Ditto for exit/teardown. */ void (*exit_request)(struct blk_mq_tag_set *set, struct request *, unsigned int); /** * @cleanup_rq: Called before freeing one request which isn't completed * yet, and usually for freeing the driver private data. */ void (*cleanup_rq)(struct request *); /** * @busy: If set, returns whether or not this queue currently is busy. */ bool (*busy)(struct request_queue *); /** * @map_queues: This allows drivers specify their own queue mapping by * overriding the setup-time function that builds the mq_map. */ void (*map_queues)(struct blk_mq_tag_set *set); #ifdef CONFIG_BLK_DEBUG_FS /** * @show_rq: Used by the debugfs implementation to show driver-specific * information about a request. */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif }; /* Keep hctx_flag_name[] in sync with the definitions below */ enum { BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, /* * Set when this device requires underlying blk-mq device for * completing IO: */ BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, BLK_MQ_F_BLOCKING = 1 << 4, /* * Alloc tags on a round-robin base instead of the first available one. */ BLK_MQ_F_TAG_RR = 1 << 5, /* * Select 'none' during queue registration in case of a single hwq * or shared hwqs instead of 'mq-deadline'. */ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6, BLK_MQ_F_MAX = 1 << 7, }; #define BLK_MQ_MAX_DEPTH (10240) #define BLK_MQ_NO_HCTX_IDX (-1U) enum { /* Keep hctx_state_name[] in sync with the definitions below */ BLK_MQ_S_STOPPED, BLK_MQ_S_TAG_ACTIVE, BLK_MQ_S_SCHED_RESTART, /* hw queue is inactive after all its CPUs become offline */ BLK_MQ_S_INACTIVE, BLK_MQ_S_MAX }; struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata, struct lock_class_key *lkclass); #define blk_mq_alloc_disk(set, lim, queuedata) \ ({ \ static struct lock_class_key __key; \ \ __blk_mq_alloc_disk(set, lim, queuedata, &__key); \ }) struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct lock_class_key *lkclass); struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata); int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); void blk_mq_destroy_queue(struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_free_request(struct request *rq); int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags); bool blk_mq_queue_inflight(struct request_queue *q); enum { /* return when out of requests */ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), /* set RQF_PM */ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), }; struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx); /* * Tag address space map. */ struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; unsigned int active_queues; struct sbitmap_queue bitmap_tags; struct sbitmap_queue breserved_tags; struct request **rqs; struct request **static_rqs; struct list_head page_list; /* * used to clear request reference in rqs[] before freeing one * request pool */ spinlock_t lock; struct rcu_head rcu_head; }; static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { if (tag < tags->nr_tags) { prefetch(tags->rqs[tag]); return tags->rqs[tag]; } return NULL; } enum { BLK_MQ_UNIQUE_TAG_BITS = 16, BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, }; u32 blk_mq_unique_tag(struct request *rq); static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) { return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; } static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) { return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } /** * blk_mq_rq_state() - read the current MQ_RQ_* state of a request * @rq: target request. */ static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) { return READ_ONCE(rq->state); } static inline int blk_mq_request_started(struct request *rq) { return blk_mq_rq_state(rq) != MQ_RQ_IDLE; } static inline int blk_mq_request_completed(struct request *rq) { return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; } /* * * Set the state to complete when completing a request from inside ->queue_rq. * This is used by drivers that want to ensure special complete actions that * need access to the request are called on failure, e.g. by nvme for * multipathing. */ static inline void blk_mq_set_request_complete(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); } /* * Complete the request directly instead of deferring it to softirq or * completing it another CPU. Useful in preemptible instead of an interrupt. */ static inline void blk_mq_complete_request_direct(struct request *rq, void (*complete)(struct request *rq)) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); complete(rq); } void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_end_request_batch(struct io_comp_batch *ib); /* * Only need start/end time stamping if we have iostat or * blk stats enabled, or using an IO scheduler. */ static inline bool blk_mq_need_time_stamp(struct request *rq) { return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); } static inline bool blk_mq_is_reserved_rq(struct request *rq) { return rq->rq_flags & RQF_RESV; } /** * blk_mq_add_to_batch() - add a request to the completion batch * @req: The request to add to batch * @iob: The batch to add the request * @is_error: Specify true if the request failed with an error * @complete: The completaion handler for the request * * Batched completions only work when there is no I/O error and no special * ->end_io handler. * * Return: true when the request was added to the batch, otherwise false */ static inline bool blk_mq_add_to_batch(struct request *req, struct io_comp_batch *iob, bool is_error, void (*complete)(struct io_comp_batch *)) { /* * Check various conditions that exclude batch processing: * 1) No batch container * 2) Has scheduler data attached * 3) Not a passthrough request and end_io set * 4) Not a passthrough request and failed with an error */ if (!iob) return false; if (req->rq_flags & RQF_SCHED_TAGS) return false; if (!blk_rq_is_passthrough(req)) { if (req->end_io) return false; if (is_error) return false; } if (!iob->complete) iob->complete = complete; else if (iob->complete != complete) return false; iob->need_ts |= blk_mq_need_time_stamp(req); rq_list_add_tail(&iob->req_list, req); return true; } void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_complete_request(struct request *rq); bool blk_mq_complete_request_remote(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_quiesce_queue(struct request_queue *q); void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set); void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); void blk_mq_freeze_queue_nomemsave(struct request_queue *q); void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q); static inline unsigned int __must_check blk_mq_freeze_queue(struct request_queue *q) { unsigned int memflags = memalloc_noio_save(); blk_mq_freeze_queue_nomemsave(q); return memflags; } static inline void blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags) { blk_mq_unfreeze_queue_nomemrestore(q); memalloc_noio_restore(memflags); } void blk_freeze_queue_start(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout); void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); void blk_freeze_queue_start_non_owner(struct request_queue *q); unsigned int blk_mq_num_possible_queues(unsigned int max_queues); unsigned int blk_mq_num_online_queues(unsigned int max_queues); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, struct device *dev, unsigned int offset); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); bool __blk_should_fake_timeout(struct request_queue *q); static inline bool blk_should_fake_timeout(struct request_queue *q) { if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) return __blk_should_fake_timeout(q); return false; } /** * blk_mq_rq_from_pdu - cast a PDU to a request * @pdu: the PDU (Protocol Data Unit) to be casted * * Return: request * * Driver command data is immediately after the request. So subtract request * size to get back to the original request. */ static inline struct request *blk_mq_rq_from_pdu(void *pdu) { return pdu - sizeof(struct request); } /** * blk_mq_rq_to_pdu - cast a request to a PDU * @rq: the request to be casted * * Return: pointer to the PDU * * Driver command data is immediately after the request. So add request to get * the PDU. */ static inline void *blk_mq_rq_to_pdu(struct request *rq) { return rq + 1; } #define queue_for_each_hw_ctx(q, hctx, i) \ xa_for_each(&(q)->hctx_table, (i), (hctx)) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) static inline void blk_mq_cleanup_rq(struct request *rq) { if (rq->q->mq_ops->cleanup_rq) rq->q->mq_ops->cleanup_rq(rq); } void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); static inline bool rq_is_sync(struct request *rq) { return op_is_sync(rq->cmd_flags); } void blk_rq_init(struct request_queue *q, struct request *rq); int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); void blk_rq_unprep_clone(struct request *rq); blk_status_t blk_insert_cloned_request(struct request *rq); struct rq_map_data { struct page **pages; unsigned long offset; unsigned short page_order; unsigned short nr_entries; bool null_mapped; bool from_user; }; int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); int blk_rq_map_user_io(struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t, bool, int, bool, int); int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); int blk_rq_unmap_user(struct bio *); int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, gfp_t gfp); int blk_rq_append_bio(struct request *rq, struct bio *bio); void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); bool blk_rq_is_poll(struct request *rq); struct req_iterator { struct bvec_iter iter; struct bio *bio; }; #define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) #define rq_for_each_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ bio_for_each_segment(bvl, _iter.bio, _iter.iter) #define rq_for_each_bvec(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ bio_for_each_bvec(bvl, _iter.bio, _iter.iter) #define rq_iter_last(bvec, _iter) \ (_iter.bio->bi_next == NULL && \ bio_iter_last(bvec, _iter.iter)) /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request * blk_rq_cur_bytes() : bytes left in the current segment * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment * blk_rq_stats_sectors() : sectors of the entire request used for stats */ static inline sector_t blk_rq_pos(const struct request *rq) { return rq->__sector; } static inline unsigned int blk_rq_bytes(const struct request *rq) { return rq->__data_len; } static inline int blk_rq_cur_bytes(const struct request *rq) { if (!rq->bio) return 0; if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ return rq->bio->bi_iter.bi_size; return bio_iovec(rq->bio).bv_len; } static inline unsigned int blk_rq_sectors(const struct request *rq) { return blk_rq_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_stats_sectors(const struct request *rq) { return rq->stats_sectors; } /* * Some commands like WRITE SAME have a payload or data transfer size which * is different from the size of the request. Any driver that supports such * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to * calculate the data transfer size. */ static inline unsigned int blk_rq_payload_bytes(struct request *rq) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) return rq->special_vec.bv_len; return blk_rq_bytes(rq); } /* * Return the first full biovec in the request. The caller needs to check that * there are any bvecs before calling this helper. */ static inline struct bio_vec req_bvec(struct request *rq) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) return rq->special_vec; return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); } static inline unsigned int blk_rq_count_bios(struct request *rq) { unsigned int nr_bios = 0; struct bio *bio; __rq_for_each_bio(bio, rq) nr_bios++; return nr_bios; } void blk_steal_bios(struct bio_list *list, struct request *rq); /* * Request completion related functions. * * blk_update_request() completes given number of bytes and updates * the request without completing it. */ bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); void blk_abort_request(struct request *); /* * Number of physical segments as sent to the device. * * Normally this is the number of discontiguous data segments sent by the * submitter. But for data-less command like discard we might have no * actual data segments submitted, but the driver might have to add it's * own special payload. In that case we still return 1 here so that this * special payload will be mapped. */ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) return 1; return rq->nr_phys_segments; } /* * Number of discard segments (or ranges) the driver needs to fill in. * Each discard bio merged into a request is counted as one segment. */ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) { return max_t(unsigned short, rq->nr_phys_segments, 1); } int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, struct scatterlist **last_sg); static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist) { struct scatterlist *last_sg = NULL; return __blk_rq_map_sg(rq, sglist, &last_sg); } void blk_dump_rq_flags(struct request *, char *); #endif /* BLK_MQ_H */ |
| 17 16 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | /* SPDX-License-Identifier: 0BSD */ /* * LZMA2 definitions * * Authors: Lasse Collin <lasse.collin@tukaani.org> * Igor Pavlov <https://7-zip.org/> */ #ifndef XZ_LZMA2_H #define XZ_LZMA2_H /* Range coder constants */ #define RC_SHIFT_BITS 8 #define RC_TOP_BITS 24 #define RC_TOP_VALUE (1 << RC_TOP_BITS) #define RC_BIT_MODEL_TOTAL_BITS 11 #define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) #define RC_MOVE_BITS 5 /* * Maximum number of position states. A position state is the lowest pb * number of bits of the current uncompressed offset. In some places there * are different sets of probabilities for different position states. */ #define POS_STATES_MAX (1 << 4) /* * This enum is used to track which LZMA symbols have occurred most recently * and in which order. This information is used to predict the next symbol. * * Symbols: * - Literal: One 8-bit byte * - Match: Repeat a chunk of data at some distance * - Long repeat: Multi-byte match at a recently seen distance * - Short repeat: One-byte repeat at a recently seen distance * * The symbol names are in from STATE_oldest_older_previous. REP means * either short or long repeated match, and NONLIT means any non-literal. */ enum lzma_state { STATE_LIT_LIT, STATE_MATCH_LIT_LIT, STATE_REP_LIT_LIT, STATE_SHORTREP_LIT_LIT, STATE_MATCH_LIT, STATE_REP_LIT, STATE_SHORTREP_LIT, STATE_LIT_MATCH, STATE_LIT_LONGREP, STATE_LIT_SHORTREP, STATE_NONLIT_MATCH, STATE_NONLIT_REP }; /* Total number of states */ #define STATES 12 /* The lowest 7 states indicate that the previous state was a literal. */ #define LIT_STATES 7 /* Indicate that the latest symbol was a literal. */ static inline void lzma_state_literal(enum lzma_state *state) { if (*state <= STATE_SHORTREP_LIT_LIT) *state = STATE_LIT_LIT; else if (*state <= STATE_LIT_SHORTREP) *state -= 3; else *state -= 6; } /* Indicate that the latest symbol was a match. */ static inline void lzma_state_match(enum lzma_state *state) { *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; } /* Indicate that the latest state was a long repeated match. */ static inline void lzma_state_long_rep(enum lzma_state *state) { *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; } /* Indicate that the latest symbol was a short match. */ static inline void lzma_state_short_rep(enum lzma_state *state) { *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; } /* Test if the previous symbol was a literal. */ static inline bool lzma_state_is_literal(enum lzma_state state) { return state < LIT_STATES; } /* Each literal coder is divided in three sections: * - 0x001-0x0FF: Without match byte * - 0x101-0x1FF: With match byte; match bit is 0 * - 0x201-0x2FF: With match byte; match bit is 1 * * Match byte is used when the previous LZMA symbol was something else than * a literal (that is, it was some kind of match). */ #define LITERAL_CODER_SIZE 0x300 /* Maximum number of literal coders */ #define LITERAL_CODERS_MAX (1 << 4) /* Minimum length of a match is two bytes. */ #define MATCH_LEN_MIN 2 /* Match length is encoded with 4, 5, or 10 bits. * * Length Bits * 2-9 4 = Choice=0 + 3 bits * 10-17 5 = Choice=1 + Choice2=0 + 3 bits * 18-273 10 = Choice=1 + Choice2=1 + 8 bits */ #define LEN_LOW_BITS 3 #define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) #define LEN_MID_BITS 3 #define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) #define LEN_HIGH_BITS 8 #define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) #define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) /* * Maximum length of a match is 273 which is a result of the encoding * described above. */ #define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) /* * Different sets of probabilities are used for match distances that have * very short match length: Lengths of 2, 3, and 4 bytes have a separate * set of probabilities for each length. The matches with longer length * use a shared set of probabilities. */ #define DIST_STATES 4 /* * Get the index of the appropriate probability array for decoding * the distance slot. */ static inline uint32_t lzma_get_dist_state(uint32_t len) { return len < DIST_STATES + MATCH_LEN_MIN ? len - MATCH_LEN_MIN : DIST_STATES - 1; } /* * The highest two bits of a 32-bit match distance are encoded using six bits. * This six-bit value is called a distance slot. This way encoding a 32-bit * value takes 6-36 bits, larger values taking more bits. */ #define DIST_SLOT_BITS 6 #define DIST_SLOTS (1 << DIST_SLOT_BITS) /* Match distances up to 127 are fully encoded using probabilities. Since * the highest two bits (distance slot) are always encoded using six bits, * the distances 0-3 don't need any additional bits to encode, since the * distance slot itself is the same as the actual distance. DIST_MODEL_START * indicates the first distance slot where at least one additional bit is * needed. */ #define DIST_MODEL_START 4 /* * Match distances greater than 127 are encoded in three pieces: * - distance slot: the highest two bits * - direct bits: 2-26 bits below the highest two bits * - alignment bits: four lowest bits * * Direct bits don't use any probabilities. * * The distance slot value of 14 is for distances 128-191. */ #define DIST_MODEL_END 14 /* Distance slots that indicate a distance <= 127. */ #define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) #define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) /* * For match distances greater than 127, only the highest two bits and the * lowest four bits (alignment) is encoded using probabilities. */ #define ALIGN_BITS 4 #define ALIGN_SIZE (1 << ALIGN_BITS) #define ALIGN_MASK (ALIGN_SIZE - 1) /* Total number of all probability variables */ #define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) /* * LZMA remembers the four most recent match distances. Reusing these * distances tends to take less space than re-encoding the actual * distance value. */ #define REPS 4 #endif |
| 1 1 8 5 5 5 2 2 1 2 5 1 2 1 2 7 3 4 7 4 4 4 3 3 7 3 3 7 8 7 7 7 7 8 8 8 4 2 2 8 8 8 4 4 4 7 7 7 7 7 8 7 2 8 4 5 5 5 2 3 3 3 13 13 5 5 4 5 2 2 3 3 3 3 1 1 2 2 2 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 5 6 6 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 1 3 4 4 5 2 4 3 1 1 1 7 8 8 4 3 4 2 3 2 2 4 4 4 4 4 4 3 2 4 4 4 1 1 1 4 4 4 1 1 1 1 2 4 3 1 4 4 4 6 6 6 5 10 5 8 6 4 4 2 2 1 1 1 1 1 1 1 2 2 2 5 5 5 1 2 2 2 2 3 1 4 2 2 4 4 4 3 1 4 4 5 5 5 5 5 5 5 13 11 5 3 3 8 5 3 7 2 6 4 2 2 1 1 2 1 1 5 5 5 3 1 5 5 5 5 5 5 5 5 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic hugetlb support. * (C) Nadia Yvette Chambers, April 2004 */ #include <linux/list.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/highmem.h> #include <linux/mmu_notifier.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/compiler.h> #include <linux/cpumask.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/memblock.h> #include <linux/minmax.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/mmdebug.h> #include <linux/sched/signal.h> #include <linux/rmap.h> #include <linux/string_choices.h> #include <linux/string_helpers.h> #include <linux/swap.h> #include <linux/leafops.h> #include <linux/jhash.h> #include <linux/numa.h> #include <linux/llist.h> #include <linux/cma.h> #include <linux/migrate.h> #include <linux/nospec.h> #include <linux/delayacct.h> #include <linux/memory.h> #include <linux/mm_inline.h> #include <linux/padata.h> #include <linux/pgalloc.h> #include <asm/page.h> #include <asm/tlb.h> #include <asm/setup.h> #include <linux/io.h> #include <linux/node.h> #include <linux/page_owner.h> #include "internal.h" #include "hugetlb_vmemmap.h" #include "hugetlb_cma.h" #include "hugetlb_internal.h" #include <linux/page-isolation.h> int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; __initdata nodemask_t hugetlb_bootmem_nodes; __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; /* * Due to ordering constraints across the init code for various * architectures, hugetlb hstate cmdline parameters can't simply * be early_param. early_param might call the setup function * before valid hugetlb page sizes are determined, leading to * incorrect rejection of valid hugepagesz= options. * * So, record the parameters early and consume them whenever the * init code is ready for them, by calling hugetlb_parse_params(). */ /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */ #define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1) struct hugetlb_cmdline { char *val; int (*setup)(char *val); }; /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static bool __initdata parsed_valid_hugepagesz = true; static bool __initdata parsed_default_hugepagesz; static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; static unsigned long hugepage_allocation_threads __initdata; static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; static int hstate_cmdline_index __initdata; static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata; static int hugetlb_param_index __initdata; static __init int hugetlb_add_param(char *s, int (*setup)(char *val)); static __init void hugetlb_parse_params(void); #define hugetlb_early_param(str, func) \ static __init int func##args(char *s) \ { \ return hugetlb_add_param(s, func); \ } \ early_param(str, func##args) /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, * free_huge_pages, and surplus_huge_pages. */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); /* * Serializes faults on the same logical page. This is used to * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes __ro_after_init; struct mutex *hugetlb_fault_mutex_table __ro_after_init; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); static void hugetlb_vma_lock_free(struct vm_area_struct *vma); static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool take_locks); static struct resv_map *vma_resv_map(struct vm_area_struct *vma); static void hugetlb_free_folio(struct folio *folio) { if (folio_test_hugetlb_cma(folio)) { hugetlb_cma_free_folio(folio); return; } folio_put(folio); } static inline bool subpool_is_free(struct hugepage_subpool *spool) { if (spool->count) return false; if (spool->max_hpages != -1) return spool->used_hpages == 0; if (spool->min_hpages != -1) return spool->rsv_hpages == spool->min_hpages; return true; } static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, unsigned long irq_flags) { spin_unlock_irqrestore(&spool->lock, irq_flags); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations based on minimum size and * free the subpool */ if (subpool_is_free(spool)) { if (spool->min_hpages != -1) hugetlb_acct_memory(spool->hstate, -spool->min_hpages); kfree(spool); } } struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages) { struct hugepage_subpool *spool; spool = kzalloc(sizeof(*spool), GFP_KERNEL); if (!spool) return NULL; spin_lock_init(&spool->lock); spool->count = 1; spool->max_hpages = max_hpages; spool->hstate = h; spool->min_hpages = min_hpages; if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { kfree(spool); return NULL; } spool->rsv_hpages = min_hpages; return spool; } void hugepage_put_subpool(struct hugepage_subpool *spool) { unsigned long flags; spin_lock_irqsave(&spool->lock, flags); BUG_ON(!spool->count); spool->count--; unlock_or_release_subpool(spool, flags); } /* * Subpool accounting for allocating and reserving pages. * Return -ENOMEM if there are not enough resources to satisfy the * request. Otherwise, return the number of pages by which the * global pools must be adjusted (upward). The returned value may * only be different than the passed value (delta) in the case where * a subpool minimum size must be maintained. */ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return ret; spin_lock_irq(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) spool->used_hpages += delta; else { ret = -ENOMEM; goto unlock_ret; } } /* minimum size accounting */ if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on * behalf of subpool. Return difference. */ ret = delta - spool->rsv_hpages; spool->rsv_hpages = 0; } else { ret = 0; /* reserves already accounted for */ spool->rsv_hpages -= delta; } } unlock_ret: spin_unlock_irq(&spool->lock); return ret; } /* * Subpool accounting for freeing and unreserving pages. * Return the number of global page reservations that must be dropped. * The return value may only be different than the passed value (delta) * in the case where a subpool minimum size must be maintained. */ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; unsigned long flags; if (!spool) return delta; spin_lock_irqsave(&spool->lock, flags); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; /* minimum size accounting */ if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else ret = spool->rsv_hpages + delta - spool->min_hpages; spool->rsv_hpages += delta; if (spool->rsv_hpages > spool->min_hpages) spool->rsv_hpages = spool->min_hpages; } /* * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ unlock_or_release_subpool(spool, flags); return ret; } static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) { return subpool_inode(file_inode(vma->vm_file)); } /* * hugetlb vma_lock helper routines */ void hugetlb_vma_lock_read(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_read(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); down_read(&resv_map->rw_sema); } } void hugetlb_vma_unlock_read(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; up_read(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); up_read(&resv_map->rw_sema); } } void hugetlb_vma_lock_write(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_write(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); down_write(&resv_map->rw_sema); } } void hugetlb_vma_unlock_write(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; up_write(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); up_write(&resv_map->rw_sema); } } int hugetlb_vma_trylock_write(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; return down_write_trylock(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); return down_write_trylock(&resv_map->rw_sema); } return 1; } void hugetlb_vma_assert_locked(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; lockdep_assert_held(&vma_lock->rw_sema); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); lockdep_assert_held(&resv_map->rw_sema); } } void hugetlb_vma_lock_release(struct kref *kref) { struct hugetlb_vma_lock *vma_lock = container_of(kref, struct hugetlb_vma_lock, refs); kfree(vma_lock); } static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) { struct vm_area_struct *vma = vma_lock->vma; /* * vma_lock structure may or not be released as a result of put, * it certainly will no longer be attached to vma so clear pointer. * Semaphore synchronizes access to vma_lock->vma field. */ vma_lock->vma = NULL; vma->vm_private_data = NULL; up_write(&vma_lock->rw_sema); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); } static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; __hugetlb_vma_unlock_write_put(vma_lock); } else if (__vma_private_lock(vma)) { struct resv_map *resv_map = vma_resv_map(vma); /* no free for anon vmas, but still need to unlock */ up_write(&resv_map->rw_sema); } } static void hugetlb_vma_lock_free(struct vm_area_struct *vma) { /* * Only present in sharable vmas. */ if (!vma || !__vma_shareable_lock(vma)) return; if (vma->vm_private_data) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_write(&vma_lock->rw_sema); __hugetlb_vma_unlock_write_put(vma_lock); } } /* * vma specific semaphore used for pmd sharing and fault/truncation * synchronization */ int hugetlb_vma_lock_alloc(struct vm_area_struct *vma) { struct hugetlb_vma_lock *vma_lock; /* Only establish in (flags) sharable vmas */ if (!vma || !(vma->vm_flags & VM_MAYSHARE)) return 0; /* Should never get here with non-NULL vm_private_data */ if (vma->vm_private_data) return -EINVAL; vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); if (!vma_lock) { /* * If we can not allocate structure, then vma can not * participate in pmd sharing. This is only a possible * performance enhancement and memory saving issue. * However, the lock is also used to synchronize page * faults with truncation. If the lock is not present, * unlikely races could leave pages in a file past i_size * until the file is removed. Warn in the unlikely case of * allocation failure. */ pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); return -EINVAL; } kref_init(&vma_lock->refs); init_rwsem(&vma_lock->rw_sema); vma_lock->vma = vma; vma->vm_private_data = vma_lock; return 0; } /* Helper that removes a struct file_region from the resv_map cache and returns * it for use. */ static struct file_region * get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) { struct file_region *nrg; VM_BUG_ON(resv->region_cache_count <= 0); resv->region_cache_count--; nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); nrg->from = from; nrg->to = to; return nrg; } static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, struct file_region *rg) { #ifdef CONFIG_CGROUP_HUGETLB nrg->reservation_counter = rg->reservation_counter; nrg->css = rg->css; if (rg->css) css_get(rg->css); #endif } /* Helper that records hugetlb_cgroup uncharge info. */ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) { #ifdef CONFIG_CGROUP_HUGETLB if (h_cg) { nrg->reservation_counter = &h_cg->rsvd_hugepage[hstate_index(h)]; nrg->css = &h_cg->css; /* * The caller will hold exactly one h_cg->css reference for the * whole contiguous reservation region. But this area might be * scattered when there are already some file_regions reside in * it. As a result, many file_regions may share only one css * reference. In order to ensure that one file_region must hold * exactly one h_cg->css reference, we should do css_get for * each file_region and leave the reference held by caller * untouched. */ css_get(&h_cg->css); if (!resv->pages_per_hpage) resv->pages_per_hpage = pages_per_huge_page(h); /* pages_per_hpage should be the same for all entries in * a resv_map. */ VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); } else { nrg->reservation_counter = NULL; nrg->css = NULL; } #endif } static void put_uncharge_info(struct file_region *rg) { #ifdef CONFIG_CGROUP_HUGETLB if (rg->css) css_put(rg->css); #endif } static bool has_same_uncharge_info(struct file_region *rg, struct file_region *org) { #ifdef CONFIG_CGROUP_HUGETLB return rg->reservation_counter == org->reservation_counter && rg->css == org->css; #else return true; #endif } static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) { struct file_region *nrg, *prg; prg = list_prev_entry(rg, link); if (&prg->link != &resv->regions && prg->to == rg->from && has_same_uncharge_info(prg, rg)) { prg->to = rg->to; list_del(&rg->link); put_uncharge_info(rg); kfree(rg); rg = prg; } nrg = list_next_entry(rg, link); if (&nrg->link != &resv->regions && nrg->from == rg->to && has_same_uncharge_info(nrg, rg)) { nrg->from = rg->from; list_del(&rg->link); put_uncharge_info(rg); kfree(rg); } } static inline long hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, long to, struct hstate *h, struct hugetlb_cgroup *cg, long *regions_needed) { struct file_region *nrg; if (!regions_needed) { nrg = get_file_region_entry_from_cache(map, from, to); record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); list_add(&nrg->link, rg); coalesce_file_region(map, nrg); } else *regions_needed += 1; return to - from; } /* * Must be called with resv->lock held. * * Calling this with regions_needed != NULL will count the number of pages * to be added but will not modify the linked list. And regions_needed will * indicate the number of file_regions needed in the cache to carry out to add * the regions for this range. */ static long add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) { long add = 0; struct list_head *head = &resv->regions; long last_accounted_offset = f; struct file_region *iter, *trg = NULL; struct list_head *rg = NULL; if (regions_needed) *regions_needed = 0; /* In this loop, we essentially handle an entry for the range * [last_accounted_offset, iter->from), at every iteration, with some * bounds checking. */ list_for_each_entry_safe(iter, trg, head, link) { /* Skip irrelevant regions that start before our range. */ if (iter->from < f) { /* If this region ends after the last accounted offset, * then we need to update last_accounted_offset. */ if (iter->to > last_accounted_offset) last_accounted_offset = iter->to; continue; } /* When we find a region that starts beyond our range, we've * finished. */ if (iter->from >= t) { rg = iter->link.prev; break; } /* Add an entry for last_accounted_offset -> iter->from, and * update last_accounted_offset. */ if (iter->from > last_accounted_offset) add += hugetlb_resv_map_add(resv, iter->link.prev, last_accounted_offset, iter->from, h, h_cg, regions_needed); last_accounted_offset = iter->to; } /* Handle the case where our range extends beyond * last_accounted_offset. */ if (!rg) rg = head->prev; if (last_accounted_offset < t) add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, t, h, h_cg, regions_needed); return add; } /* Must be called with resv->lock acquired. Will drop lock to allocate entries. */ static int allocate_file_region_entries(struct resv_map *resv, int regions_needed) __must_hold(&resv->lock) { LIST_HEAD(allocated_regions); int to_allocate = 0, i = 0; struct file_region *trg = NULL, *rg = NULL; VM_BUG_ON(regions_needed < 0); /* * Check for sufficient descriptors in the cache to accommodate * the number of in progress add operations plus regions_needed. * * This is a while loop because when we drop the lock, some other call * to region_add or region_del may have consumed some region_entries, * so we keep looping here until we finally have enough entries for * (adds_in_progress + regions_needed). */ while (resv->region_cache_count < (resv->adds_in_progress + regions_needed)) { to_allocate = resv->adds_in_progress + regions_needed - resv->region_cache_count; /* At this point, we should have enough entries in the cache * for all the existing adds_in_progress. We should only be * needing to allocate for regions_needed. */ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); spin_unlock(&resv->lock); for (i = 0; i < to_allocate; i++) { trg = kmalloc(sizeof(*trg), GFP_KERNEL); if (!trg) goto out_of_memory; list_add(&trg->link, &allocated_regions); } spin_lock(&resv->lock); list_splice(&allocated_regions, &resv->region_cache); resv->region_cache_count += to_allocate; } return 0; out_of_memory: list_for_each_entry_safe(rg, trg, &allocated_regions, link) { list_del(&rg->link); kfree(rg); } return -ENOMEM; } /* * Add the huge page range represented by [f, t) to the reserve * map. Regions will be taken from the cache to fill in this range. * Sufficient regions should exist in the cache due to the previous * call to region_chg with the same range, but in some cases the cache will not * have sufficient entries due to races with other code doing region_add or * region_del. The extra needed entries will be allocated. * * regions_needed is the out value provided by a previous call to region_chg. * * Return the number of new huge pages added to the map. This number is greater * than or equal to zero. If file_region entries needed to be allocated for * this operation and we were not able to allocate, it returns -ENOMEM. * region_add of regions of length 1 never allocate file_regions and cannot * fail; region_chg will always allocate at least 1 entry and a region_add for * 1 page will only require at most 1 entry. */ static long region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) { long add = 0, actual_regions_needed = 0; spin_lock(&resv->lock); retry: /* Count how many regions are actually needed to execute this add. */ add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed); /* * Check for sufficient descriptors in the cache to accommodate * this add operation. Note that actual_regions_needed may be greater * than in_regions_needed, as the resv_map may have been modified since * the region_chg call. In this case, we need to make sure that we * allocate extra entries, such that we have enough for all the * existing adds_in_progress, plus the excess needed for this * operation. */ if (actual_regions_needed > in_regions_needed && resv->region_cache_count < resv->adds_in_progress + (actual_regions_needed - in_regions_needed)) { /* region_add operation of range 1 should never need to * allocate file_region entries. */ VM_BUG_ON(t - f <= 1); if (allocate_file_region_entries( resv, actual_regions_needed - in_regions_needed)) { return -ENOMEM; } goto retry; } add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); resv->adds_in_progress -= in_regions_needed; spin_unlock(&resv->lock); return add; } /* * Examine the existing reserve map and determine how many * huge pages in the specified range [f, t) are NOT currently * represented. This routine is called before a subsequent * call to region_add that will actually modify the reserve * map to add the specified range [f, t). region_chg does * not change the number of huge pages represented by the * map. A number of new file_region structures is added to the cache as a * placeholder, for the subsequent region_add call to use. At least 1 * file_region structure is added. * * out_regions_needed is the number of regions added to the * resv->adds_in_progress. This value needs to be provided to a follow up call * to region_add or region_abort for proper accounting. * * Returns the number of huge pages that need to be added to the existing * reservation map for the range [f, t). This number is greater or equal to * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) { long chg = 0; spin_lock(&resv->lock); /* Count how many hugepages in this range are NOT represented. */ chg = add_reservation_in_range(resv, f, t, NULL, NULL, out_regions_needed); if (*out_regions_needed == 0) *out_regions_needed = 1; if (allocate_file_region_entries(resv, *out_regions_needed)) return -ENOMEM; resv->adds_in_progress += *out_regions_needed; spin_unlock(&resv->lock); return chg; } /* * Abort the in progress add operation. The adds_in_progress field * of the resv_map keeps track of the operations in progress between * calls to region_chg and region_add. Operations are sometimes * aborted after the call to region_chg. In such cases, region_abort * is called to decrement the adds_in_progress counter. regions_needed * is the value returned by the region_chg call, it is used to decrement * the adds_in_progress counter. * * NOTE: The range arguments [f, t) are not needed or used in this * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */ static void region_abort(struct resv_map *resv, long f, long t, long regions_needed) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); resv->adds_in_progress -= regions_needed; spin_unlock(&resv->lock); } /* * Delete the specified range [f, t) from the reserve map. If the * t parameter is LONG_MAX, this indicates that ALL regions after f * should be deleted. Locate the regions which intersect [f, t) * and either trim, delete or split the existing regions. * * Returns the number of huge pages deleted from the reserve map. * In the normal case, the return value is zero or more. In the * case where a region must be split, a new region descriptor must * be allocated. If the allocation fails, -ENOMEM will be returned. * NOTE: If the parameter t == LONG_MAX, then we will never split * a region and possibly return -ENOMEM. Callers specifying * t == LONG_MAX do not need to check for -ENOMEM error. */ static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; struct file_region *nrg = NULL; long del = 0; retry: spin_lock(&resv->lock); list_for_each_entry_safe(rg, trg, head, link) { /* * Skip regions before the range to be deleted. file_region * ranges are normally of the form [from, to). However, there * may be a "placeholder" entry in the map which is of the form * (from, to) with from == to. Check for placeholder entries * at the beginning of the range to be deleted. */ if (rg->to <= f && (rg->to != rg->from || rg->to != f)) continue; if (rg->from >= t) break; if (f > rg->from && t < rg->to) { /* Must split region */ /* * Check for an entry in the cache before dropping * lock and attempting allocation. */ if (!nrg && resv->region_cache_count > resv->adds_in_progress) { nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); resv->region_cache_count--; } if (!nrg) { spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; } del += t - f; hugetlb_cgroup_uncharge_file_region( resv, rg, t - f, false); /* New entry for end of split region */ nrg->from = t; nrg->to = rg->to; copy_hugetlb_cgroup_uncharge_info(nrg, rg); INIT_LIST_HEAD(&nrg->link); /* Original entry is trimmed */ rg->to = f; list_add(&nrg->link, &rg->link); nrg = NULL; break; } if (f <= rg->from && t >= rg->to) { /* Remove entire region */ del += rg->to - rg->from; hugetlb_cgroup_uncharge_file_region(resv, rg, rg->to - rg->from, true); list_del(&rg->link); kfree(rg); continue; } if (f <= rg->from) { /* Trim beginning of region */ hugetlb_cgroup_uncharge_file_region(resv, rg, t - rg->from, false); del += t - rg->from; rg->from = t; } else { /* Trim end of region */ hugetlb_cgroup_uncharge_file_region(resv, rg, rg->to - f, false); del += rg->to - f; rg->to = f; } } spin_unlock(&resv->lock); kfree(nrg); return del; } /* * A rare out of memory error was encountered which prevented removal of * the reserve map region for a page. The huge page itself was free'ed * and removed from the page cache. This routine will adjust the subpool * usage count, and the global reserve count if needed. By incrementing * these counts, the reserve map entry which could not be deleted will * appear as a "reserved" entry instead of simply dangling with incorrect * counts. */ void hugetlb_fix_reserve_counts(struct inode *inode) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; bool reserved = false; rsv_adjust = hugepage_subpool_get_pages(spool, 1); if (rsv_adjust > 0) { struct hstate *h = hstate_inode(inode); if (!hugetlb_acct_memory(h, 1)) reserved = true; } else if (!rsv_adjust) { reserved = true; } if (!reserved) pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); } /* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). */ static long region_count(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg; long chg = 0; spin_lock(&resv->lock); /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { long seg_from; long seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } spin_unlock(&resv->lock); return chg; } /* * Convert the address within this vma to the page offset within * the mapping, huge page units here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); } /** * vma_kernel_pagesize - Page size granularity for this VMA. * @vma: The user mapping. * * Folios in this VMA will be aligned to, and at least the size of the * number of bytes returned by this function. * * Return: The default size of the folios allocated when backing a VMA. */ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->pagesize) return vma->vm_ops->pagesize(vma); return PAGE_SIZE; } EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific 'strong' * version of this symbol is required. */ __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); } /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of hugetlb_dup_vma_private() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */ static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; } static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; } static void resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, struct hugetlb_cgroup *h_cg, struct hstate *h) { #ifdef CONFIG_CGROUP_HUGETLB if (!h_cg || !h) { resv_map->reservation_counter = NULL; resv_map->pages_per_hpage = 0; resv_map->css = NULL; } else { resv_map->reservation_counter = &h_cg->rsvd_hugepage[hstate_index(h)]; resv_map->pages_per_hpage = pages_per_huge_page(h); resv_map->css = &h_cg->css; } #endif } struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); kfree(rg); return NULL; } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); init_rwsem(&resv_map->rw_sema); resv_map->adds_in_progress = 0; /* * Initialize these to 0. On shared mappings, 0's here indicate these * fields don't do cgroup accounting. On private mappings, these will be * re-initialized to the proper values, to indicate that hugetlb cgroup * reservations are to be un-charged from here. */ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache); resv_map->region_cache_count = 1; return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct list_head *head = &resv_map->region_cache; struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { list_del(&rg->link); kfree(rg); } VM_BUG_ON(resv_map->adds_in_progress); kfree(resv_map); } static inline struct resv_map *inode_resv_map(struct inode *inode) { /* * At inode evict time, i_mapping may not point to the original * address space within the inode. This original address space * contains the pointer to the resv_map. So, always use the * address space embedded within the inode. * The VERY common case is inode->mapping == &inode->i_data but, * this may not be true for device special inodes. */ return (struct resv_map *)(&inode->i_data)->i_private_data; } static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; return inode_resv_map(inode); } else { return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); } } static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma); VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map) { VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags)); VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE); desc->private_data = map; } static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags) { VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags)); VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE); desc->private_data = (void *)((unsigned long)desc->private_data | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag) { VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags)); return ((unsigned long)desc->private_data) & flag; } bool __vma_private_lock(struct vm_area_struct *vma) { return !(vma->vm_flags & VM_MAYSHARE) && get_vma_private_data(vma) & ~HPAGE_RESV_MASK && is_vma_resv_set(vma, HPAGE_RESV_OWNER); } void hugetlb_dup_vma_private(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); /* * Clear vm_private_data * - For shared mappings this is a per-vma semaphore that may be * allocated in a subsequent call to hugetlb_vm_op_open. * Before clearing, make sure pointer is not associated with vma * as this will leak the structure. This is the case when called * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already * been called to allocate a new structure. * - For MAP_PRIVATE mappings, this is the reserve map which does * not apply to children. Faults generated by the children are * not guaranteed to succeed, even if read-only. */ if (vma->vm_flags & VM_MAYSHARE) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; if (vma_lock && vma_lock->vma != vma) vma->vm_private_data = NULL; } else vma->vm_private_data = NULL; } /* * Reset and decrement one ref on hugepage private reservation. * Called with mm->mmap_lock writer semaphore held. * This function should be only used by mremap and operate on * same sized vma. It should never come here with last ref on the * reservation. */ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) { /* * Clear the old hugetlb private page reservation. * It has already been transferred to new_vma. * * During a mremap() operation of a hugetlb vma we call move_vma() * which copies vma into new_vma and unmaps vma. After the copy * operation both new_vma and vma share a reference to the resv_map * struct, and at that point vma is about to be unmapped. We don't * want to return the reservation to the pool at unmap of vma because * the reservation still lives on in new_vma, so simply decrement the * ref here and remove the resv_map reference from this vma. */ struct resv_map *reservations = vma_resv_map(vma); if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { resv_map_put_hugetlb_cgroup_uncharge_info(reservations); kref_put(&reservations->refs, resv_map_release); } hugetlb_dup_vma_private(vma); } static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) { int nid = folio_nid(folio); lockdep_assert_held(&hugetlb_lock); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); list_move(&folio->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; folio_set_hugetlb_freed(folio); } static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, int nid) { struct folio *folio; bool pin = !!(current->flags & PF_MEMALLOC_PIN); lockdep_assert_held(&hugetlb_lock); list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { if (pin && !folio_is_longterm_pinnable(folio)) continue; if (folio_test_hwpoison(folio)) continue; if (is_migrate_isolate_page(&folio->page)) continue; list_move(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); folio_clear_hugetlb_freed(folio); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return folio; } return NULL; } static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { unsigned int cpuset_mems_cookie; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; int node = NUMA_NO_NODE; /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ if (nid == NUMA_NO_NODE) nid = numa_node_id(); zonelist = node_zonelist(nid, gfp_mask); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { struct folio *folio; if (!cpuset_zone_allowed(zone, gfp_mask)) continue; /* * no need to ask again on the same node. Pool is node rather than * zone aware */ if (zone_to_nid(zone) == node) continue; node = zone_to_nid(zone); folio = dequeue_hugetlb_folio_node_exact(h, node); if (folio) return folio; } if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return NULL; } static unsigned long available_huge_pages(struct hstate *h) { return h->free_huge_pages - h->resv_huge_pages; } static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, long gbl_chg) { struct folio *folio = NULL; struct mempolicy *mpol; gfp_t gfp_mask; nodemask_t *nodemask; int nid; /* * gbl_chg==1 means the allocation requires a new page that was not * reserved before. Making sure there's at least one free page. */ if (gbl_chg && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); if (mpol_is_preferred_many(mpol)) { folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); /* Fallback to all nodes if page==NULL */ nodemask = NULL; } if (!folio) folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); mpol_cond_put(mpol); return folio; err: return NULL; } #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE #ifdef CONFIG_CONTIG_ALLOC static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { struct folio *folio; bool retried = false; retry: folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask); if (!folio) { if (hugetlb_cma_exclusive_alloc()) return NULL; folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); if (!folio) return NULL; } if (folio_ref_freeze(folio, 1)) return folio; pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); hugetlb_free_folio(folio); if (!retried) { retried = true; goto retry; } return NULL; } #else /* !CONFIG_CONTIG_ALLOC */ static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } #endif /* CONFIG_CONTIG_ALLOC */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } #endif /* * Remove hugetlb folio from lists. * If vmemmap exists for the folio, clear the hugetlb flag so that the * folio appears as just a compound page. Otherwise, wait until after * allocating vmemmap to clear the flag. * * Must be called with hugetlb lock held. */ void remove_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus) { int nid = folio_nid(folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); lockdep_assert_held(&hugetlb_lock); if (hstate_is_gigantic_no_runtime(h)) return; list_del(&folio->lru); if (folio_test_hugetlb_freed(folio)) { folio_clear_hugetlb_freed(folio); h->free_huge_pages--; h->free_huge_pages_node[nid]--; } if (adjust_surplus) { h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } /* * We can only clear the hugetlb flag after allocating vmemmap * pages. Otherwise, someone (memory error handling) may try to write * to tail struct pages. */ if (!folio_test_hugetlb_vmemmap_optimized(folio)) __folio_clear_hugetlb(folio); h->nr_huge_pages--; h->nr_huge_pages_node[nid]--; } void add_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus) { int nid = folio_nid(folio); VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); lockdep_assert_held(&hugetlb_lock); INIT_LIST_HEAD(&folio->lru); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; if (adjust_surplus) { h->surplus_huge_pages++; h->surplus_huge_pages_node[nid]++; } __folio_set_hugetlb(folio); folio_change_private(folio, NULL); /* * We have to set hugetlb_vmemmap_optimized again as above * folio_change_private(folio, NULL) cleared it. */ folio_set_hugetlb_vmemmap_optimized(folio); arch_clear_hugetlb_flags(folio); enqueue_hugetlb_folio(h, folio); } static void __update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio) { bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); if (hstate_is_gigantic_no_runtime(h)) return; /* * If we don't know which subpages are hwpoisoned, we can't free * the hugepage, so it's leaked intentionally. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return; /* * If folio is not vmemmap optimized (!clear_flag), then the folio * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio * can only be passed hugetlb pages and will BUG otherwise. */ if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { spin_lock_irq(&hugetlb_lock); /* * If we cannot allocate vmemmap pages, just refuse to free the * page and put the page back on the hugetlb free list and treat * as a surplus page. */ add_hugetlb_folio(h, folio, true); spin_unlock_irq(&hugetlb_lock); return; } /* * If vmemmap pages were allocated above, then we need to clear the * hugetlb flag under the hugetlb lock. */ if (folio_test_hugetlb(folio)) { spin_lock_irq(&hugetlb_lock); __folio_clear_hugetlb(folio); spin_unlock_irq(&hugetlb_lock); } /* * Move PageHWPoison flag from head page to the raw error pages, * which makes any healthy subpages reusable. */ if (unlikely(folio_test_hwpoison(folio))) folio_clear_hugetlb_hwpoison(folio); folio_ref_unfreeze(folio, 1); hugetlb_free_folio(folio); } /* * As update_and_free_hugetlb_folio() can be called under any context, so we cannot * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate * the vmemmap pages. * * free_hpage_workfn() locklessly retrieves the linked list of pages to be * freed and frees them one-by-one. As the page->mapping pointer is going * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node * structure of a lockless linked list of huge pages to be freed. */ static LLIST_HEAD(hpage_freelist); static void free_hpage_workfn(struct work_struct *work) { struct llist_node *node; node = llist_del_all(&hpage_freelist); while (node) { struct folio *folio; struct hstate *h; folio = container_of((struct address_space **)node, struct folio, mapping); node = node->next; folio->mapping = NULL; /* * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in * folio_hstate() is going to trigger because a previous call to * remove_hugetlb_folio() will clear the hugetlb bit, so do * not use folio_hstate() directly. */ h = size_to_hstate(folio_size(folio)); __update_and_free_hugetlb_folio(h, folio); cond_resched(); } } static DECLARE_WORK(free_hpage_work, free_hpage_workfn); static inline void flush_free_hpage_work(struct hstate *h) { if (hugetlb_vmemmap_optimizable(h)) flush_work(&free_hpage_work); } static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, bool atomic) { if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { __update_and_free_hugetlb_folio(h, folio); return; } /* * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. * * Only call schedule_work() if hpage_freelist is previously * empty. Otherwise, schedule_work() had been called but the workfn * hasn't retrieved the list yet. */ if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) schedule_work(&free_hpage_work); } static void bulk_vmemmap_restore_error(struct hstate *h, struct list_head *folio_list, struct list_head *non_hvo_folios) { struct folio *folio, *t_folio; if (!list_empty(non_hvo_folios)) { /* * Free any restored hugetlb pages so that restore of the * entire list can be retried. * The idea is that in the common case of ENOMEM errors freeing * hugetlb pages with vmemmap we will free up memory so that we * can allocate vmemmap for more hugetlb pages. */ list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { list_del(&folio->lru); spin_lock_irq(&hugetlb_lock); __folio_clear_hugetlb(folio); spin_unlock_irq(&hugetlb_lock); update_and_free_hugetlb_folio(h, folio, false); cond_resched(); } } else { /* * In the case where there are no folios which can be * immediately freed, we loop through the list trying to restore * vmemmap individually in the hope that someone elsewhere may * have done something to cause success (such as freeing some * memory). If unable to restore a hugetlb page, the hugetlb * page is made a surplus page and removed from the list. * If are able to restore vmemmap and free one hugetlb page, we * quit processing the list to retry the bulk operation. */ list_for_each_entry_safe(folio, t_folio, folio_list, lru) if (hugetlb_vmemmap_restore_folio(h, folio)) { list_del(&folio->lru); spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, true); spin_unlock_irq(&hugetlb_lock); } else { list_del(&folio->lru); spin_lock_irq(&hugetlb_lock); __folio_clear_hugetlb(folio); spin_unlock_irq(&hugetlb_lock); update_and_free_hugetlb_folio(h, folio, false); cond_resched(); break; } } } static void update_and_free_pages_bulk(struct hstate *h, struct list_head *folio_list) { long ret; struct folio *folio, *t_folio; LIST_HEAD(non_hvo_folios); /* * First allocate required vmemmmap (if necessary) for all folios. * Carefully handle errors and free up any available hugetlb pages * in an effort to make forward progress. */ retry: ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); if (ret < 0) { bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); goto retry; } /* * At this point, list should be empty, ret should be >= 0 and there * should only be pages on the non_hvo_folios list. * Do note that the non_hvo_folios list could be empty. * Without HVO enabled, ret will be 0 and there is no need to call * __folio_clear_hugetlb as this was done previously. */ VM_WARN_ON(!list_empty(folio_list)); VM_WARN_ON(ret < 0); if (!list_empty(&non_hvo_folios) && ret) { spin_lock_irq(&hugetlb_lock); list_for_each_entry(folio, &non_hvo_folios, lru) __folio_clear_hugetlb(folio); spin_unlock_irq(&hugetlb_lock); } list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { update_and_free_hugetlb_folio(h, folio, false); cond_resched(); } } struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; for_each_hstate(h) { if (huge_page_size(h) == size) return h; } return NULL; } void free_huge_folio(struct folio *folio) { /* * Can't pass hstate in here because it is called from the * generic mm code. */ struct hstate *h = folio_hstate(folio); int nid = folio_nid(folio); struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); bool restore_reserve; unsigned long flags; VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); hugetlb_set_folio_subpool(folio, NULL); if (folio_test_anon(folio)) __ClearPageAnonExclusive(&folio->page); folio->mapping = NULL; restore_reserve = folio_test_hugetlb_restore_reserve(folio); folio_clear_hugetlb_restore_reserve(folio); /* * If HPageRestoreReserve was set on page, page allocation consumed a * reservation. If the page was associated with a subpool, there * would have been a page reserved in the subpool before allocation * via hugepage_subpool_get_pages(). Since we are 'restoring' the * reservation, do not call hugepage_subpool_put_pages() as this will * remove the reserved page from the subpool. */ if (!restore_reserve) { /* * A return code of zero implies that the subpool will be * under its minimum size if the reservation is not restored * after page is free. Therefore, force restore_reserve * operation. */ if (hugepage_subpool_put_pages(spool, 1) == 0) restore_reserve = true; } spin_lock_irqsave(&hugetlb_lock, flags); folio_clear_hugetlb_migratable(folio); hugetlb_cgroup_uncharge_folio(hstate_index(h), pages_per_huge_page(h), folio); hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), pages_per_huge_page(h), folio); lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); mem_cgroup_uncharge(folio); if (restore_reserve) h->resv_huge_pages++; if (folio_test_hugetlb_temporary(folio)) { remove_hugetlb_folio(h, folio, false); spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else if (h->surplus_huge_pages_node[nid]) { /* remove the page from active list */ remove_hugetlb_folio(h, folio, true); spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else { arch_clear_hugetlb_flags(folio); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } } /* * Must be called with the hugetlb lock held */ static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio) { lockdep_assert_held(&hugetlb_lock); h->nr_huge_pages++; h->nr_huge_pages_node[folio_nid(folio)]++; } void init_new_hugetlb_folio(struct folio *folio) { __folio_set_hugetlb(folio); INIT_LIST_HEAD(&folio->lru); hugetlb_set_folio_subpool(folio, NULL); set_hugetlb_cgroup(folio, NULL); set_hugetlb_cgroup_rsvd(folio, NULL); } /* * Find and lock address space (mapping) in write mode. * * Upon entry, the folio is locked which means that folio_mapping() is * stable. Due to locking order, we can only trylock_write. If we can * not get the lock, simply return NULL to caller. */ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); if (!mapping) return mapping; if (i_mmap_trylock_write(mapping)) return mapping; return NULL; } static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) { struct folio *folio; bool alloc_try_hard = true; /* * By default we always try hard to allocate the folio with * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in * a loop (to adjust global huge page counts) and previous allocation * failed, do not continue to try hard on the same node. Use the * node_alloc_noretry bitmap to manage this state information. */ if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) alloc_try_hard = false; if (alloc_try_hard) gfp_mask |= __GFP_RETRY_MAYFAIL; folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask); /* * If we did not specify __GFP_RETRY_MAYFAIL, but still got a * folio this indicates an overall state change. Clear bit so * that we resume normal 'try hard' allocations. */ if (node_alloc_noretry && folio && !alloc_try_hard) node_clear(nid, *node_alloc_noretry); /* * If we tried hard to get a folio but failed, set bit so that * subsequent attempts will not try as hard until there is an * overall state change. */ if (node_alloc_noretry && !folio && alloc_try_hard) node_set(nid, *node_alloc_noretry); if (!folio) { __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return NULL; } __count_vm_event(HTLB_BUDDY_PGALLOC); return folio; } static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) { struct folio *folio; int order = huge_page_order(h); if (nid == NUMA_NO_NODE) nid = numa_mem_id(); if (order_is_gigantic(order)) folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask); else folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask, node_alloc_noretry); if (folio) init_new_hugetlb_folio(folio); return folio; } /* * Common helper to allocate a fresh hugetlb folio. All specific allocators * should use this function to get new hugetlb folio * * Note that returned folio is 'frozen': ref count of head page and all tail * pages is zero, and the accounting must be done in the caller. */ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio; folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); if (folio) hugetlb_vmemmap_optimize_folio(h, folio); return folio; } void prep_and_add_allocated_folios(struct hstate *h, struct list_head *folio_list) { unsigned long flags; struct folio *folio, *tmp_f; /* Send list for bulk vmemmap optimization processing */ hugetlb_vmemmap_optimize_folios(h, folio_list); /* Add all new pool pages to free lists in one lock cycle */ spin_lock_irqsave(&hugetlb_lock, flags); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { account_new_hugetlb_folio(h, folio); enqueue_hugetlb_folio(h, folio); } spin_unlock_irqrestore(&hugetlb_lock, flags); } /* * Allocates a fresh hugetlb page in a node interleaved manner. The page * will later be added to the appropriate hugetlb pool. */ static struct folio *alloc_pool_huge_folio(struct hstate *h, nodemask_t *nodes_allowed, nodemask_t *node_alloc_noretry, int *next_node) { gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; int nr_nodes, node; for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { struct folio *folio; folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, nodes_allowed, node_alloc_noretry); if (folio) return folio; } return NULL; } /* * Remove huge page from pool from next node to free. Attempt to keep * persistent huge pages more or less balanced over allowed nodes. * This routine only 'removes' the hugetlb page. The caller must make * an additional call to free the page to low level allocators. * Called with hugetlb_lock locked. */ static struct folio *remove_pool_hugetlb_folio(struct hstate *h, nodemask_t *nodes_allowed, bool acct_surplus) { int nr_nodes, node; struct folio *folio = NULL; lockdep_assert_held(&hugetlb_lock); for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { /* * If we're returning unused surplus pages, only examine * nodes with surplus pages. */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { folio = list_entry(h->hugepage_freelists[node].next, struct folio, lru); remove_hugetlb_folio(h, folio, acct_surplus); break; } } return folio; } /* * Dissolve a given free hugetlb folio into free buddy pages. This function * does nothing for in-use hugetlb folios and non-hugetlb folios. * This function returns values like below: * * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages * when the system is under memory pressure and the feature of * freeing unused vmemmap pages associated with each hugetlb page * is enabled. * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use * (allocated or reserved.) * 0: successfully dissolved free hugepages or the page is not a * hugepage (considered as already dissolved) */ int dissolve_free_hugetlb_folio(struct folio *folio) { int rc = -EBUSY; retry: /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (!folio_test_hugetlb(folio)) return 0; spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(folio)) { rc = 0; goto out; } if (!folio_ref_count(folio)) { struct hstate *h = folio_hstate(folio); bool adjust_surplus = false; if (!available_huge_pages(h)) goto out; /* * We should make sure that the page is already on the free list * when it is dissolved. */ if (unlikely(!folio_test_hugetlb_freed(folio))) { spin_unlock_irq(&hugetlb_lock); cond_resched(); /* * Theoretically, we should return -EBUSY when we * encounter this race. In fact, we have a chance * to successfully dissolve the page if we do a * retry. Because the race window is quite small. * If we seize this opportunity, it is an optimization * for increasing the success rate of dissolving page. */ goto retry; } if (h->surplus_huge_pages_node[folio_nid(folio)]) adjust_surplus = true; remove_hugetlb_folio(h, folio, adjust_surplus); h->max_huge_pages--; spin_unlock_irq(&hugetlb_lock); /* * Normally update_and_free_hugtlb_folio will allocate required vmemmmap * before freeing the page. update_and_free_hugtlb_folio will fail to * free the page if it can not allocate required vmemmap. We * need to adjust max_huge_pages if the page is not freed. * Attempt to allocate vmemmmap here so that we can take * appropriate action on failure. * * The folio_test_hugetlb check here is because * remove_hugetlb_folio will clear hugetlb folio flag for * non-vmemmap optimized hugetlb folios. */ if (folio_test_hugetlb(folio)) { rc = hugetlb_vmemmap_restore_folio(h, folio); if (rc) { spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, adjust_surplus); h->max_huge_pages++; goto out; } } else rc = 0; update_and_free_hugetlb_folio(h, folio, false); return rc; } out: spin_unlock_irq(&hugetlb_lock); return rc; } /* * Dissolve free hugepages in a given pfn range. Used by memory hotplug to * make specified memory blocks removable from the system. * Note that this will dissolve a free gigantic hugepage completely, if any * part of it lies within the given range. * Also note that if dissolve_free_hugetlb_folio() returns with an error, all * free hugetlb folios that were dissolved before that error are lost. */ int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct folio *folio; int rc = 0; unsigned int order; struct hstate *h; if (!hugepages_supported()) return rc; order = huge_page_order(&default_hstate); for_each_hstate(h) order = min(order, huge_page_order(h)); for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { folio = pfn_folio(pfn); rc = dissolve_free_hugetlb_folio(folio); if (rc) break; } return rc; } /* * Allocates a fresh surplus page from the page allocator. */ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio = NULL; if (hstate_is_gigantic_no_runtime(h)) return NULL; spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; spin_unlock_irq(&hugetlb_lock); folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); if (!folio) return NULL; spin_lock_irq(&hugetlb_lock); /* * nr_huge_pages needs to be adjusted within the same lock cycle * as surplus_pages, otherwise it might confuse * persistent_huge_pages() momentarily. */ account_new_hugetlb_folio(h, folio); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse * temporary page to workaround the nasty free_huge_folio * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { folio_set_hugetlb_temporary(folio); spin_unlock_irq(&hugetlb_lock); free_huge_folio(folio); return NULL; } h->surplus_huge_pages++; h->surplus_huge_pages_node[folio_nid(folio)]++; out_unlock: spin_unlock_irq(&hugetlb_lock); return folio; } static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio; if (hstate_is_gigantic(h)) return NULL; folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); if (!folio) return NULL; spin_lock_irq(&hugetlb_lock); account_new_hugetlb_folio(h, folio); spin_unlock_irq(&hugetlb_lock); /* fresh huge pages are frozen */ folio_ref_unfreeze(folio, 1); /* * We do not account these pages as surplus because they are only * temporary and will be released properly on the last reference */ folio_set_hugetlb_temporary(folio); return folio; } /* * Use the VMA's mpolicy to allocate a huge page from the buddy. */ static struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct folio *folio = NULL; struct mempolicy *mpol; gfp_t gfp_mask = htlb_alloc_mask(h); int nid; nodemask_t *nodemask; nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); if (mpol_is_preferred_many(mpol)) { gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); /* Fallback to all nodes if page==NULL */ nodemask = NULL; } if (!folio) folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); mpol_cond_put(mpol); return folio; } struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask) { struct folio *folio; spin_lock_irq(&hugetlb_lock); if (!h->resv_huge_pages) { spin_unlock_irq(&hugetlb_lock); return NULL; } folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, nmask); if (folio) h->resv_huge_pages--; spin_unlock_irq(&hugetlb_lock); return folio; } /* folio migration callback function */ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) { spin_lock_irq(&hugetlb_lock); if (available_huge_pages(h)) { struct folio *folio; folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, nmask); if (folio) { spin_unlock_irq(&hugetlb_lock); return folio; } } spin_unlock_irq(&hugetlb_lock); /* We cannot fallback to other nodes, as we could break the per-node pool. */ if (!allow_alloc_fallback) gfp_mask |= __GFP_THISNODE; return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); } static nodemask_t *policy_mbind_nodemask(gfp_t gfp) { #ifdef CONFIG_NUMA struct mempolicy *mpol = get_task_policy(current); /* * Only enforce MPOL_BIND policy which overlaps with cpuset policy * (from policy_nodemask) specifically for hugetlb case */ if (mpol->mode == MPOL_BIND && (apply_policy_zone(mpol, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) return &mpol->nodes; #endif return NULL; } /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, long delta) __must_hold(&hugetlb_lock) { LIST_HEAD(surplus_list); struct folio *folio, *tmp; int ret; long i; long needed, allocated; bool alloc_ok = true; nodemask_t *mbind_nodemask, alloc_nodemask; mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); if (mbind_nodemask) nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); else alloc_nodemask = cpuset_current_mems_allowed; lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; return 0; } allocated = 0; ret = -ENOMEM; retry: spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { folio = NULL; /* * It is okay to use NUMA_NO_NODE because we use numa_mem_id() * down the road to pick the current node if that is the case. */ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), NUMA_NO_NODE, &alloc_nodemask); if (!folio) { alloc_ok = false; break; } list_add(&folio->lru, &surplus_list); cond_resched(); } allocated += i; /* * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock_irq(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { if (alloc_ok) goto retry; /* * We were not able to allocate enough pages to * satisfy the entire reservation so we free what * we've allocated so far. */ goto free; } /* * The surplus_list now contains _at_least_ the number of extra pages * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but * before they are reserved. */ needed += allocated; h->resv_huge_pages += delta; ret = 0; /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { if ((--needed) < 0) break; /* Add the page to the hugetlb allocator */ enqueue_hugetlb_folio(h, folio); } free: spin_unlock_irq(&hugetlb_lock); /* * Free unnecessary surplus pages to the buddy allocator. * Pages have no ref count, call free_huge_folio directly. */ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) free_huge_folio(folio); spin_lock_irq(&hugetlb_lock); return ret; } /* * This routine has two main purposes: * 1) Decrement the reservation count (resv_huge_pages) by the value passed * in unused_resv_pages. This corresponds to the prior adjustments made * to the associated reservation map. * 2) Free any unused surplus pages that may have been allocated to satisfy * the reservation. As many as unused_resv_pages may be freed. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; LIST_HEAD(page_list); lockdep_assert_held(&hugetlb_lock); /* Uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; if (hstate_is_gigantic_no_runtime(h)) goto out; /* * Part (or even all) of the reservation could have been backed * by pre-allocated pages. Only free surplus pages. */ nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* * We want to release as many surplus pages as possible, spread * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. * remove_pool_hugetlb_folio() will balance the freed pages across the * on-line nodes with memory and will handle the hstate accounting. */ while (nr_pages--) { struct folio *folio; folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); if (!folio) goto out; list_add(&folio->lru, &page_list); } out: spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); spin_lock_irq(&hugetlb_lock); } /* * vma_needs_reservation, vma_commit_reservation and vma_end_reservation * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called * to add the page to the reservation map. If the page allocation fails, * the reservation must be ended instead of committed. vma_end_reservation * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this * is not the case is if a reserve map was changed between calls. It * is the responsibility of the caller to notice the difference and * take appropriate action. * * vma_add_reservation is used in error paths where a reservation must * be restored when a newly allocated huge page must be freed. It is * to be called after calling vma_needs_reservation to determine if a * reservation exists. * * vma_del_reservation is used in error paths where an entry in the reserve * map was created during huge page allocation and must be removed. It is to * be called after calling vma_needs_reservation to determine if a reservation * exists. */ enum vma_resv_mode { VMA_NEEDS_RESV, VMA_COMMIT_RESV, VMA_END_RESV, VMA_ADD_RESV, VMA_DEL_RESV, }; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; long ret; long dummy_out_regions_needed; resv = vma_resv_map(vma); if (!resv) return 1; idx = vma_hugecache_offset(h, vma, addr); switch (mode) { case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); /* We assume that vma_reservation_* routines always operate on * 1 page, and that adding to resv map a 1 page entry can only * ever require 1 region. */ VM_BUG_ON(dummy_out_regions_needed != 1); break; case VMA_COMMIT_RESV: ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); break; case VMA_END_RESV: region_abort(resv, idx, idx + 1, 1); ret = 0; break; case VMA_ADD_RESV: if (vma->vm_flags & VM_MAYSHARE) { ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); } else { region_abort(resv, idx, idx + 1, 1); ret = region_del(resv, idx, idx + 1); } break; case VMA_DEL_RESV: if (vma->vm_flags & VM_MAYSHARE) { region_abort(resv, idx, idx + 1, 1); ret = region_del(resv, idx, idx + 1); } else { ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); } break; default: BUG(); } if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) return ret; /* * We know private mapping must have HPAGE_RESV_OWNER set. * * In most cases, reserves always exist for private mappings. * However, a file associated with mapping could have been * hole punched or truncated after reserves were consumed. * As subsequent fault on such a range will not use reserves. * Subtle - The reserve map for private mappings has the * opposite meaning than that of shared mappings. If NO * entry is in the reserve map, it means a reservation exists. * If an entry exists in the reserve map, it means the * reservation has already been consumed. As a result, the * return value of this routine is the opposite of the * value returned from reserve map manipulation routines above. */ if (ret > 0) return 0; if (ret == 0) return 1; return ret; } static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } static void vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } static long vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); } static long vma_del_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); } /* * This routine is called to restore reservation information on error paths. * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), * and the hugetlb mutex should remain held when calling this routine. * * It handles two specific cases: * 1) A reservation was in place and the folio consumed the reservation. * hugetlb_restore_reserve is set in the folio. * 2) No reservation was in place for the page, so hugetlb_restore_reserve is * not set. However, alloc_hugetlb_folio always updates the reserve map. * * In case 1, free_huge_folio later in the error path will increment the * global reserve count. But, free_huge_folio does not have enough context * to adjust the reservation map. This case deals primarily with private * mappings. Adjust the reserve map here to be consistent with global * reserve count adjustments to be made by free_huge_folio. Make sure the * reserve map indicates there is a reservation present. * * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. */ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct folio *folio) { long rc = vma_needs_reservation(h, vma, address); if (folio_test_hugetlb_restore_reserve(folio)) { if (unlikely(rc < 0)) /* * Rare out of memory condition in reserve map * manipulation. Clear hugetlb_restore_reserve so * that global reserve count will not be incremented * by free_huge_folio. This will make it appear * as though the reservation for this folio was * consumed. This may prevent the task from * faulting in the folio at a later time. This * is better than inconsistent global huge page * accounting of reserve counts. */ folio_clear_hugetlb_restore_reserve(folio); else if (rc) (void)vma_add_reservation(h, vma, address); else vma_end_reservation(h, vma, address); } else { if (!rc) { /* * This indicates there is an entry in the reserve map * not added by alloc_hugetlb_folio. We know it was added * before the alloc_hugetlb_folio call, otherwise * hugetlb_restore_reserve would be set on the folio. * Remove the entry so that a subsequent allocation * does not consume a reservation. */ rc = vma_del_reservation(h, vma, address); if (rc < 0) /* * VERY rare out of memory condition. Since * we can not delete the entry, set * hugetlb_restore_reserve so that the reserve * count will be incremented when the folio * is freed. This reserve will be consumed * on a subsequent allocation. */ folio_set_hugetlb_restore_reserve(folio); } else if (rc < 0) { /* * Rare out of memory condition from * vma_needs_reservation call. Memory allocation is * only attempted if a new entry is needed. Therefore, * this implies there is not an entry in the * reserve map. * * For shared mappings, no entry in the map indicates * no reservation. We are done. */ if (!(vma->vm_flags & VM_MAYSHARE)) /* * For private mappings, no entry indicates * a reservation is present. Since we can * not add an entry, set hugetlb_restore_reserve * on the folio so reserve count will be * incremented when freed. This reserve will * be consumed on a subsequent allocation. */ folio_set_hugetlb_restore_reserve(folio); } else /* * No reservation present, do nothing */ vma_end_reservation(h, vma, address); } } /* * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve * the old one * @old_folio: Old folio to dissolve * @list: List to isolate the page in case we need to * Returns 0 on success, otherwise negated error. */ static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio, struct list_head *list) { gfp_t gfp_mask; struct hstate *h; int nid = folio_nid(old_folio); struct folio *new_folio = NULL; int ret = 0; retry: /* * The old_folio might have been dissolved from under our feet, so make sure * to carefully check the state under the lock. */ spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(old_folio)) { /* * Freed from under us. Drop new_folio too. */ goto free_new; } else if (folio_ref_count(old_folio)) { bool isolated; /* * Someone has grabbed the folio, try to isolate it here. * Fail with -EBUSY if not possible. */ spin_unlock_irq(&hugetlb_lock); isolated = folio_isolate_hugetlb(old_folio, list); ret = isolated ? 0 : -EBUSY; spin_lock_irq(&hugetlb_lock); goto free_new; } else if (!folio_test_hugetlb_freed(old_folio)) { /* * Folio's refcount is 0 but it has not been enqueued in the * freelist yet. Race window is small, so we can succeed here if * we retry. */ spin_unlock_irq(&hugetlb_lock); cond_resched(); goto retry; } else { h = folio_hstate(old_folio); if (!new_folio) { spin_unlock_irq(&hugetlb_lock); gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, NULL); if (!new_folio) return -ENOMEM; goto retry; } /* * Ok, old_folio is still a genuine free hugepage. Remove it from * the freelist and decrease the counters. These will be * incremented again when calling account_new_hugetlb_folio() * and enqueue_hugetlb_folio() for new_folio. The counters will * remain stable since this happens under the lock. */ remove_hugetlb_folio(h, old_folio, false); /* * Ref count on new_folio is already zero as it was dropped * earlier. It can be directly added to the pool free list. */ account_new_hugetlb_folio(h, new_folio); enqueue_hugetlb_folio(h, new_folio); /* * Folio has been replaced, we can safely free the old one. */ spin_unlock_irq(&hugetlb_lock); update_and_free_hugetlb_folio(h, old_folio, false); } return ret; free_new: spin_unlock_irq(&hugetlb_lock); if (new_folio) update_and_free_hugetlb_folio(h, new_folio, false); return ret; } int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) { int ret = -EBUSY; /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (!folio_test_hugetlb(folio)) return 0; /* * Fence off gigantic pages as there is a cyclic dependency between * alloc_contig_range and them. Return -ENOMEM as this has the effect * of bailing out right away without further retrying. */ if (order_is_gigantic(folio_order(folio))) return -ENOMEM; if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) ret = 0; else if (!folio_ref_count(folio)) ret = alloc_and_dissolve_hugetlb_folio(folio, list); return ret; } /* * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn * range with new folios. * @start_pfn: start pfn of the given pfn range * @end_pfn: end pfn of the given pfn range * Returns 0 on success, otherwise negated error. */ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) { struct folio *folio; int ret = 0; LIST_HEAD(isolate_list); while (start_pfn < end_pfn) { folio = pfn_folio(start_pfn); /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) { ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list); if (ret) break; putback_movable_pages(&isolate_list); } start_pfn++; } return ret; } void wait_for_freed_hugetlb_folios(void) { if (llist_empty(&hpage_freelist)) return; flush_work(&free_hpage_work); } typedef enum { /* * For either 0/1: we checked the per-vma resv map, and one resv * count either can be reused (0), or an extra needed (1). */ MAP_CHG_REUSE = 0, MAP_CHG_NEEDED = 1, /* * Cannot use per-vma resv count can be used, hence a new resv * count is enforced. * * NOTE: This is mostly identical to MAP_CHG_NEEDED, except * that currently vma_needs_reservation() has an unwanted side * effect to either use end() or commit() to complete the * transaction. Hence it needs to differentiate from NEEDED. */ MAP_CHG_ENFORCED = 2, } map_chg_state; /* * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW * faults of hugetlb private mappings on top of a non-page-cache folio (in * which case even if there's a private vma resv map it won't cover such * allocation). New call sites should (probably) never set it to true!! * When it's set, the allocation will bypass all vma level reservations. */ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, bool cow_from_owner) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; long retval, gbl_chg, gbl_reserve; map_chg_state map_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; idx = hstate_index(h); /* Whether we need a separate per-vma reservation? */ if (cow_from_owner) { /* * Special case! Since it's a CoW on top of a reserved * page, the private resv map doesn't count. So it cannot * consume the per-vma resv map even if it's reserved. */ map_chg = MAP_CHG_ENFORCED; } else { /* * Examine the region/reserve map to determine if the process * has a reservation for the page to be allocated. A return * code of zero indicates a reservation exists (no change). */ retval = vma_needs_reservation(h, vma, addr); if (retval < 0) return ERR_PTR(-ENOMEM); map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; } /* * Whether we need a separate global reservation? * * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. * Or if it can get one from the pool reservation directly. */ if (map_chg) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; } else { /* * If we have the vma reservation ready, no need for extra * global reservation. */ gbl_chg = 0; } /* * If this allocation is not consuming a per-vma reservation, * charge the hugetlb cgroup now. */ if (map_chg) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_uncharge_cgroup_reservation; spin_lock_irq(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); if (!folio) { spin_unlock_irq(&hugetlb_lock); folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); list_add(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); /* Fall through */ } /* * Either dequeued or buddy-allocated folio needs to add special * mark to the folio when it consumes a global reservation. */ if (!gbl_chg) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. */ if (map_chg) { hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), h_cg, folio); } spin_unlock_irq(&hugetlb_lock); hugetlb_set_folio_subpool(folio, spool); if (map_chg != MAP_CHG_ENFORCED) { /* commit() is only needed if the map_chg is not enforced */ retval = vma_commit_reservation(h, vma, addr); /* * Check for possible race conditions. When it happens.. * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); if (map_chg) { spin_lock_irq(&hugetlb_lock); hugetlb_cgroup_uncharge_folio_rsvd( hstate_index(h), pages_per_huge_page(h), folio); spin_unlock_irq(&hugetlb_lock); } } } ret = mem_cgroup_charge_hugetlb(folio, gfp); /* * Unconditionally increment NR_HUGETLB here. If it turns out that * mem_cgroup_charge_hugetlb failed, then immediately free the page and * decrement NR_HUGETLB. */ lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); if (ret == -ENOMEM) { free_huge_folio(folio); return ERR_PTR(-ENOMEM); } return folio; out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_uncharge_cgroup_reservation: if (map_chg) hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: /* * put page to subpool iff the quota of subpool's rsv_hpages is used * during hugepage_subpool_get_pages. */ if (map_chg && !gbl_chg) { gbl_reserve = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -gbl_reserve); } out_end_reservation: if (map_chg != MAP_CHG_ENFORCED) vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) { struct huge_bootmem_page *m; int listnode = nid; if (hugetlb_early_cma(h)) m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact); else { if (node_exact) m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); else { m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); /* * For pre-HVO to work correctly, pages need to be on * the list for the node they were actually allocated * from. That node may be different in the case of * fallback by memblock_alloc_try_nid_raw. So, * extract the actual node first. */ if (m) listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); } if (m) { m->flags = 0; m->cma = NULL; } } if (m) { /* * Use the beginning of the huge page to store the * huge_bootmem_page struct (until gather_bootmem * puts them into the mem_map). * * Put them into a private list first because mem_map * is not up yet. */ INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages[listnode]); m->hstate = h; } return m; } int alloc_bootmem_huge_page(struct hstate *h, int nid) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h, int nid) { struct huge_bootmem_page *m = NULL; /* initialize for clang */ int nr_nodes, node = nid; /* do node specific alloc */ if (nid != NUMA_NO_NODE) { m = alloc_bootmem(h, node, true); if (!m) return 0; goto found; } /* allocate from next node when distributing huge pages */ for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &hugetlb_bootmem_nodes) { m = alloc_bootmem(h, node, false); if (!m) return 0; goto found; } found: /* * Only initialize the head struct page in memmap_init_reserved_pages, * rest of the struct pages will be initialized by the HugeTLB * subsystem itself. * The head struct page is used to get folio information by the HugeTLB * subsystem like zone id and node id. */ memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), huge_page_size(h) - PAGE_SIZE); return 1; } /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, unsigned long start_page_number, unsigned long end_page_number) { enum zone_type zone = folio_zonenum(folio); int nid = folio_nid(folio); struct page *page = folio_page(folio, start_page_number); unsigned long head_pfn = folio_pfn(folio); unsigned long pfn, end_pfn = head_pfn + end_page_number; /* * As we marked all tail pages with memblock_reserved_mark_noinit(), * we must initialize them ourselves here. */ for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) { __init_single_page(page, pfn, zone, nid); prep_compound_tail((struct page *)folio, pfn - head_pfn); set_page_count(page, 0); } } static void __init hugetlb_folio_init_vmemmap(struct folio *folio, struct hstate *h, unsigned long nr_pages) { int ret; /* * This is an open-coded prep_compound_page() whereby we avoid * walking pages twice by initializing/preparing+freezing them in the * same go. */ __folio_clear_reserved(folio); __folio_set_head(folio); ret = folio_ref_freeze(folio, 1); VM_BUG_ON(!ret); hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); prep_compound_head(&folio->page, huge_page_order(h)); } static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) { return m->flags & HUGE_BOOTMEM_HVO; } static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m) { return m->flags & HUGE_BOOTMEM_CMA; } /* * memblock-allocated pageblocks might not have the migrate type set * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) * here, or MIGRATE_CMA if this was a page allocated through an early CMA * reservation. * * In case of vmemmap optimized folios, the tail vmemmap pages are mapped * read-only, but that's ok - for sparse vmemmap this does not write to * the page structure. */ static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, struct hstate *h) { unsigned long nr_pages = pages_per_huge_page(h), i; WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); for (i = 0; i < nr_pages; i += pageblock_nr_pages) { if (folio_test_hugetlb_cma(folio)) init_cma_pageblock(folio_page(folio, i)); else init_pageblock_migratetype(folio_page(folio, i), MIGRATE_MOVABLE, false); } } static void __init prep_and_add_bootmem_folios(struct hstate *h, struct list_head *folio_list) { unsigned long flags; struct folio *folio, *tmp_f; /* Send list for bulk vmemmap optimization processing */ hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { if (!folio_test_hugetlb_vmemmap_optimized(folio)) { /* * If HVO fails, initialize all tail struct pages * We do not worry about potential long lock hold * time as this is early in boot and there should * be no contention. */ hugetlb_folio_init_tail_vmemmap(folio, HUGETLB_VMEMMAP_RESERVE_PAGES, pages_per_huge_page(h)); } hugetlb_bootmem_init_migratetype(folio, h); /* Subdivide locks to achieve better parallel performance */ spin_lock_irqsave(&hugetlb_lock, flags); account_new_hugetlb_folio(h, folio); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } } bool __init hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m) { unsigned long start_pfn; bool valid; if (m->flags & HUGE_BOOTMEM_ZONES_VALID) { /* * Already validated, skip check. */ return true; } if (hugetlb_bootmem_page_earlycma(m)) { valid = cma_validate_zones(m->cma); goto out; } start_pfn = virt_to_phys(m) >> PAGE_SHIFT; valid = !pfn_range_intersects_zones(nid, start_pfn, pages_per_huge_page(m->hstate)); out: if (!valid) hstate_boot_nrinvalid[hstate_index(m->hstate)]++; return valid; } /* * Free a bootmem page that was found to be invalid (intersecting with * multiple zones). * * Since it intersects with multiple zones, we can't just do a free * operation on all pages at once, but instead have to walk all * pages, freeing them one by one. */ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, struct hstate *h) { unsigned long npages = pages_per_huge_page(h); unsigned long pfn; while (npages--) { pfn = page_to_pfn(page); __init_page_from_nid(pfn, nid); free_reserved_page(page); page++; } } /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. */ static void __init gather_bootmem_prealloc_node(unsigned long nid) { LIST_HEAD(folio_list); struct huge_bootmem_page *m, *tm; struct hstate *h = NULL, *prev_h = NULL; list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { struct page *page = virt_to_page(m); struct folio *folio = (void *)page; h = m->hstate; if (!hugetlb_bootmem_page_zones_valid(nid, m)) { /* * Can't use this page. Initialize the * page structures if that hasn't already * been done, and give them to the page * allocator. */ hugetlb_bootmem_free_invalid_page(nid, page, h); continue; } /* * It is possible to have multiple huge page sizes (hstates) * in this list. If so, process each size separately. */ if (h != prev_h && prev_h != NULL) prep_and_add_bootmem_folios(prev_h, &folio_list); prev_h = h; VM_BUG_ON(!hstate_is_gigantic(h)); WARN_ON(folio_ref_count(folio) != 1); hugetlb_folio_init_vmemmap(folio, h, HUGETLB_VMEMMAP_RESERVE_PAGES); init_new_hugetlb_folio(folio); if (hugetlb_bootmem_page_prehvo(m)) /* * If pre-HVO was done, just set the * flag, the HVO code will then skip * this folio. */ folio_set_hugetlb_vmemmap_optimized(folio); if (hugetlb_bootmem_page_earlycma(m)) folio_set_hugetlb_cma(folio); list_add(&folio->lru, &folio_list); /* * We need to restore the 'stolen' pages to totalram_pages * in order to fix confusing memory reports from free(1) and * other side-effects, like CommitLimit going negative. * * For CMA pages, this is done in init_cma_pageblock * (via hugetlb_bootmem_init_migratetype), so skip it here. */ if (!folio_test_hugetlb_cma(folio)) adjust_managed_page_count(page, pages_per_huge_page(h)); cond_resched(); } prep_and_add_bootmem_folios(h, &folio_list); } static void __init gather_bootmem_prealloc_parallel(unsigned long start, unsigned long end, void *arg) { int nid; for (nid = start; nid < end; nid++) gather_bootmem_prealloc_node(nid); } static void __init gather_bootmem_prealloc(void) { struct padata_mt_job job = { .thread_fn = gather_bootmem_prealloc_parallel, .fn_arg = NULL, .start = 0, .size = nr_node_ids, .align = 1, .min_chunk = 1, .max_threads = num_node_state(N_MEMORY), .numa_aware = true, }; padata_do_multithreaded(&job); } static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) { unsigned long i; char buf[32]; LIST_HEAD(folio_list); for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h, nid)) break; } else { struct folio *folio; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, &node_states[N_MEMORY], NULL); if (!folio) break; list_add(&folio->lru, &folio_list); } cond_resched(); } if (!list_empty(&folio_list)) prep_and_add_allocated_folios(h, &folio_list); if (i == h->max_huge_pages_node[nid]) return; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", h->max_huge_pages_node[nid], buf, nid, i); h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); h->max_huge_pages_node[nid] = i; } static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) { int i; bool node_specific_alloc = false; for_each_online_node(i) { if (h->max_huge_pages_node[i] > 0) { hugetlb_hstate_alloc_pages_onenode(h, i); node_specific_alloc = true; } } return node_specific_alloc; } static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) { if (allocated < h->max_huge_pages) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", h->max_huge_pages, buf, allocated); h->max_huge_pages = allocated; } } static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) { struct hstate *h = (struct hstate *)arg; int i, num = end - start; nodemask_t node_alloc_noretry; LIST_HEAD(folio_list); int next_node = first_online_node; /* Bit mask controlling how hard we retry per-node allocations.*/ nodes_clear(node_alloc_noretry); for (i = 0; i < num; ++i) { struct folio *folio; if (hugetlb_vmemmap_optimizable_size(h) && (si_mem_available() == 0) && !list_empty(&folio_list)) { prep_and_add_allocated_folios(h, &folio_list); INIT_LIST_HEAD(&folio_list); } folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], &node_alloc_noretry, &next_node); if (!folio) break; list_move(&folio->lru, &folio_list); cond_resched(); } prep_and_add_allocated_folios(h, &folio_list); } static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) { unsigned long i; for (i = 0; i < h->max_huge_pages; ++i) { if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) break; cond_resched(); } return i; } static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) { struct padata_mt_job job = { .fn_arg = h, .align = 1, .numa_aware = true }; unsigned long jiffies_start; unsigned long jiffies_end; unsigned long remaining; job.thread_fn = hugetlb_pages_alloc_boot_node; /* * job.max_threads is 25% of the available cpu threads by default. * * On large servers with terabytes of memory, huge page allocation * can consume a considerably amount of time. * * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages. * 2MiB huge pages. Using more threads can significantly improve allocation time. * * +-----------------------+-------+-------+-------+-------+-------+ * | threads | 8 | 16 | 32 | 64 | 128 | * +-----------------------+-------+-------+-------+-------+-------+ * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | * +-----------------------+-------+-------+-------+-------+-------+ */ if (hugepage_allocation_threads == 0) { hugepage_allocation_threads = num_online_cpus() / 4; hugepage_allocation_threads = max(hugepage_allocation_threads, 1); } job.max_threads = hugepage_allocation_threads; jiffies_start = jiffies; do { remaining = h->max_huge_pages - h->nr_huge_pages; job.start = h->nr_huge_pages; job.size = remaining; job.min_chunk = remaining / hugepage_allocation_threads; padata_do_multithreaded(&job); if (h->nr_huge_pages == h->max_huge_pages) break; /* * Retry only if the vmemmap optimization might have been able to free * some memory back to the system. */ if (!hugetlb_vmemmap_optimizable(h)) break; /* Continue if progress was made in last iteration */ } while (remaining != (h->max_huge_pages - h->nr_huge_pages)); jiffies_end = jiffies; pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n", jiffies_to_msecs(jiffies_end - jiffies_start), hugepage_allocation_threads); return h->nr_huge_pages; } /* * NOTE: this routine is called in different contexts for gigantic and * non-gigantic pages. * - For gigantic pages, this is called early in the boot process and * pages are allocated from memblock allocated or something similar. * Gigantic pages are actually added to pools later with the routine * gather_bootmem_prealloc. * - For non-gigantic pages, this is called later in the boot process after * all of mm is up and functional. Pages are allocated from buddy and * then added to hugetlb pools. */ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long allocated; /* * Skip gigantic hugepages allocation if early CMA * reservations are not available. */ if (hstate_is_gigantic(h) && hugetlb_cma_total_size() && !hugetlb_early_cma(h)) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); return; } if (!h->max_huge_pages) return; /* do node specific alloc */ if (hugetlb_hstate_alloc_pages_specific_nodes(h)) return; /* below will do all node balanced alloc */ if (hstate_is_gigantic(h)) allocated = hugetlb_gigantic_pages_alloc_boot(h); else allocated = hugetlb_pages_alloc_boot(h); hugetlb_hstate_alloc_pages_errcheck(allocated, h); } static void __init hugetlb_init_hstates(void) { struct hstate *h, *h2; for_each_hstate(h) { /* * Always reset to first_memory_node here, even if * next_nid_to_alloc was set before - we can't * reference hugetlb_bootmem_nodes after init, and * first_memory_node is right for all further allocations. */ h->next_nid_to_alloc = first_memory_node; h->next_nid_to_free = first_memory_node; /* oversize hugepages were init'ed in early boot */ if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); /* * Set demote order for each hstate. Note that * h->demote_order is initially 0. * - We can not demote gigantic pages if runtime freeing * is not supported, so skip this. * - If CMA allocation is possible, we can not demote * HUGETLB_PAGE_ORDER or smaller size pages. */ if (hstate_is_gigantic_no_runtime(h)) continue; if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER) continue; for_each_hstate(h2) { if (h2 == h) continue; if (h2->order < h->order && h2->order > h->demote_order) h->demote_order = h2->order; } } } static void __init report_hugepages(void) { struct hstate *h; unsigned long nrinvalid; for_each_hstate(h) { char buf[32]; nrinvalid = hstate_boot_nrinvalid[hstate_index(h)]; h->max_huge_pages -= nrinvalid; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", buf, h->nr_huge_pages); if (nrinvalid) pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n", buf, nrinvalid, str_plural(nrinvalid)); pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); } } #ifdef CONFIG_HIGHMEM static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; LIST_HEAD(page_list); lockdep_assert_held(&hugetlb_lock); if (hstate_is_gigantic(h)) return; /* * Collect pages to be freed on a list, and free after dropping lock */ for_each_node_mask(i, *nodes_allowed) { struct folio *folio, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(folio, next, freel, lru) { if (count >= h->nr_huge_pages) goto out; if (folio_test_highmem(folio)) continue; remove_hugetlb_folio(h, folio, false); list_add(&folio->lru, &page_list); } } out: spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); spin_lock_irq(&hugetlb_lock); } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { } #endif /* * Increment or decrement surplus_huge_pages. Keep node-specific counters * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, int delta) { int nr_nodes, node; lockdep_assert_held(&hugetlb_lock); VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) { for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node]) goto found; } } else { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node] < h->nr_huge_pages_node[node]) goto found; } } return 0; found: h->surplus_huge_pages += delta; h->surplus_huge_pages_node[node] += delta; return 1; } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) { unsigned long persistent_free_count; unsigned long min_count; unsigned long allocated; struct folio *folio; LIST_HEAD(page_list); NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); /* * Bit mask controlling how hard we retry per-node allocations. * If we can not allocate the bit mask, do not attempt to allocate * the requested huge pages. */ if (node_alloc_noretry) nodes_clear(*node_alloc_noretry); else return -ENOMEM; /* * resize_lock mutex prevents concurrent adjustments to number of * pages in hstate via the proc/sysfs interfaces. */ mutex_lock(&h->resize_lock); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); /* * Check for a node specific request. * Changing node specific huge page count may require a corresponding * change to the global count. In any case, the passed node mask * (nodes_allowed) will restrict alloc/free to the specified node. */ if (nid != NUMA_NO_NODE) { unsigned long old_count = count; count += persistent_huge_pages(h) - (h->nr_huge_pages_node[nid] - h->surplus_huge_pages_node[nid]); /* * User may have specified a large count value which caused the * above calculation to overflow. In this case, they wanted * to allocate as many huge pages as possible. Set count to * largest possible value to align with their intention. */ if (count < old_count) count = ULONG_MAX; } /* * Gigantic pages runtime allocation depend on the capability for large * page range allocation. * If the system does not provide this feature, return an error when * the user tries to allocate gigantic pages but let the user free the * boottime allocated gigantic pages. */ if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { if (count > persistent_huge_pages(h)) { spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return -EINVAL; } /* Fall through to decrease pool */ } /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_surplus_hugetlb_folio() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } allocated = 0; while (count > (persistent_huge_pages(h) + allocated)) { /* * If this allocation races such that we no longer need the * page, free_huge_folio will handle it by freeing the page * and reducing the surplus. */ spin_unlock_irq(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); folio = alloc_pool_huge_folio(h, nodes_allowed, node_alloc_noretry, &h->next_nid_to_alloc); if (!folio) { prep_and_add_allocated_folios(h, &page_list); spin_lock_irq(&hugetlb_lock); goto out; } list_add(&folio->lru, &page_list); allocated++; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) { prep_and_add_allocated_folios(h, &page_list); spin_lock_irq(&hugetlb_lock); goto out; } spin_lock_irq(&hugetlb_lock); } /* Add allocated pages to the pool */ if (!list_empty(&page_list)) { spin_unlock_irq(&hugetlb_lock); prep_and_add_allocated_folios(h, &page_list); spin_lock_irq(&hugetlb_lock); } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_surplus_hugetlb_folio() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. * * min_count is the expected number of persistent pages, we * shouldn't calculate min_count by using * resv_huge_pages + persistent_huge_pages() - free_huge_pages, * because there may exist free surplus huge pages, and this will * lead to subtracting twice. Free surplus huge pages come from HVO * failing to restore vmemmap, see comments in the callers of * hugetlb_vmemmap_restore_folio(). Thus, we should calculate * persistent free count first. */ persistent_free_count = h->free_huge_pages; if (h->free_huge_pages > persistent_huge_pages(h)) { if (h->free_huge_pages > h->surplus_huge_pages) persistent_free_count -= h->surplus_huge_pages; else persistent_free_count = 0; } min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); /* * Collect pages to be removed on list without dropping lock */ while (min_count < persistent_huge_pages(h)) { folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); if (!folio) break; list_add(&folio->lru, &page_list); } /* free the pages after dropping lock */ spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: h->max_huge_pages = persistent_huge_pages(h); spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return 0; } static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, struct list_head *src_list) { long rc; struct folio *folio, *next; LIST_HEAD(dst_list); LIST_HEAD(ret_list); rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); list_splice_init(&ret_list, src_list); /* * Taking target hstate mutex synchronizes with set_max_huge_pages. * Without the mutex, pages added to target hstate could be marked * as surplus. * * Note that we already hold src->resize_lock. To prevent deadlock, * use the convention of always taking larger size hstate mutex first. */ mutex_lock(&dst->resize_lock); list_for_each_entry_safe(folio, next, src_list, lru) { int i; bool cma; if (folio_test_hugetlb_vmemmap_optimized(folio)) continue; cma = folio_test_hugetlb_cma(folio); list_del(&folio->lru); split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { struct page *page = folio_page(folio, i); /* Careful: see __split_huge_page_tail() */ struct folio *new_folio = (struct folio *)page; clear_compound_head(page); prep_compound_page(page, dst->order); new_folio->mapping = NULL; init_new_hugetlb_folio(new_folio); /* Copy the CMA flag so that it is freed correctly */ if (cma) folio_set_hugetlb_cma(new_folio); list_add(&new_folio->lru, &dst_list); } } prep_and_add_allocated_folios(dst, &dst_list); mutex_unlock(&dst->resize_lock); return rc; } long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, unsigned long nr_to_demote) __must_hold(&hugetlb_lock) { int nr_nodes, node; struct hstate *dst; long rc = 0; long nr_demoted = 0; lockdep_assert_held(&hugetlb_lock); /* We should never get here if no demote order */ if (!src->demote_order) { pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); return -EINVAL; /* internal error */ } dst = size_to_hstate(PAGE_SIZE << src->demote_order); for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { LIST_HEAD(list); struct folio *folio, *next; list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { if (folio_test_hwpoison(folio)) continue; remove_hugetlb_folio(src, folio, false); list_add(&folio->lru, &list); if (++nr_demoted == nr_to_demote) break; } spin_unlock_irq(&hugetlb_lock); rc = demote_free_hugetlb_folios(src, dst, &list); spin_lock_irq(&hugetlb_lock); list_for_each_entry_safe(folio, next, &list, lru) { list_del(&folio->lru); add_hugetlb_folio(src, folio, false); nr_demoted--; } if (rc < 0 || nr_demoted == nr_to_demote) break; } /* * Not absolutely necessary, but for consistency update max_huge_pages * based on pool changes for the demoted page. */ src->max_huge_pages -= nr_demoted; dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); if (rc < 0) return rc; if (nr_demoted) return nr_demoted; /* * Only way to get here is if all pages on free lists are poisoned. * Return -EBUSY so that caller will not retry. */ return -EBUSY; } ssize_t __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) { int err; nodemask_t nodes_allowed, *n_mask; if (hstate_is_gigantic_no_runtime(h)) return -EINVAL; if (nid == NUMA_NO_NODE) { /* * global hstate attribute */ if (!(obey_mempolicy && init_nodemask_of_mempolicy(&nodes_allowed))) n_mask = &node_states[N_MEMORY]; else n_mask = &nodes_allowed; } else { /* * Node specific request. count adjustment happens in * set_max_huge_pages() after acquiring hugetlb_lock. */ init_nodemask_of_node(&nodes_allowed, nid); n_mask = &nodes_allowed; } err = set_max_huge_pages(h, count, nid, n_mask); return err ? err : len; } static int __init hugetlb_init(void) { int i; BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < __NR_HPAGEFLAGS); BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER); if (!hugepages_supported()) { if (hugetlb_max_hstate || default_hstate_max_huge_pages) pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); return 0; } /* * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some * architectures depend on setup being done here. */ hugetlb_add_hstate(HUGETLB_PAGE_ORDER); if (!parsed_default_hugepagesz) { /* * If we did not parse a default huge page size, set * default_hstate_idx to HPAGE_SIZE hstate. And, if the * number of huge pages for this default size was implicitly * specified, set that here as well. * Note that the implicit setting will overwrite an explicit * setting. A warning will be printed in this case. */ default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); if (default_hstate_max_huge_pages) { if (default_hstate.max_huge_pages) { char buf[32]; string_get_size(huge_page_size(&default_hstate), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", default_hstate.max_huge_pages, buf); pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", default_hstate_max_huge_pages); } default_hstate.max_huge_pages = default_hstate_max_huge_pages; for_each_online_node(i) default_hstate.max_huge_pages_node[i] = default_hugepages_in_node[i]; } } hugetlb_cma_check(); hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); hugetlb_sysfs_init(); hugetlb_cgroup_file_init(); hugetlb_sysctl_init(); #ifdef CONFIG_SMP num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); #else num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = kmalloc_array(num_fault_mutexes, sizeof(struct mutex), GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } subsys_initcall(hugetlb_init); /* Overwritten by architectures with more huge page sizes */ bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) { return size == HPAGE_SIZE; } void __init hugetlb_add_hstate(unsigned int order) { struct hstate *h; unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); WARN_ON(order > MAX_FOLIO_ORDER); h = &hstates[hugetlb_max_hstate++]; __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); h->order = order; h->mask = ~(huge_page_size(h) - 1); for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/SZ_1K); parsed_hstate = h; } bool __init __weak hugetlb_node_alloc_supported(void) { return true; } static void __init hugepages_clear_pages_in_node(void) { if (!hugetlb_max_hstate) { default_hstate_max_huge_pages = 0; memset(default_hugepages_in_node, 0, sizeof(default_hugepages_in_node)); } else { parsed_hstate->max_huge_pages = 0; memset(parsed_hstate->max_huge_pages_node, 0, sizeof(parsed_hstate->max_huge_pages_node)); } } static __init int hugetlb_add_param(char *s, int (*setup)(char *)) { size_t len; char *p; if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS) return -EINVAL; len = strlen(s) + 1; if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf)) return -EINVAL; p = &hstate_cmdline_buf[hstate_cmdline_index]; memcpy(p, s, len); hstate_cmdline_index += len; hugetlb_params[hugetlb_param_index].val = p; hugetlb_params[hugetlb_param_index].setup = setup; hugetlb_param_index++; return 0; } static __init void hugetlb_parse_params(void) { int i; struct hugetlb_cmdline *hcp; for (i = 0; i < hugetlb_param_index; i++) { hcp = &hugetlb_params[i]; hcp->setup(hcp->val); } hugetlb_cma_validate_params(); } /* * hugepages command line processing * hugepages normally follows a valid hugepagsz or default_hugepagsz * specification. If not, ignore the hugepages value. hugepages can also * be the first huge page command line option in which case it implicitly * specifies the number of huge pages for the default size. */ static int __init hugepages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; int node = NUMA_NO_NODE; int count; unsigned long tmp; char *p = s; if (!parsed_valid_hugepagesz) { pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; return -EINVAL; } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter * yet, so this hugepages= parameter goes to the "default hstate". * Otherwise, it goes with the previously parsed hugepagesz or * default_hugepagesz. */ else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); return 1; } while (*p) { count = 0; if (sscanf(p, "%lu%n", &tmp, &count) != 1) goto invalid; /* Parameter is node format */ if (p[count] == ':') { if (!hugetlb_node_alloc_supported()) { pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); return 1; } if (tmp >= MAX_NUMNODES || !node_online(tmp)) goto invalid; node = array_index_nospec(tmp, MAX_NUMNODES); p += count + 1; /* Parse hugepages */ if (sscanf(p, "%lu%n", &tmp, &count) != 1) goto invalid; if (!hugetlb_max_hstate) default_hugepages_in_node[node] = tmp; else parsed_hstate->max_huge_pages_node[node] = tmp; *mhp += tmp; /* Go to parse next node*/ if (p[count] == ',') p += count + 1; else break; } else { if (p != s) goto invalid; *mhp = tmp; break; } } last_mhp = mhp; return 0; invalid: pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); hugepages_clear_pages_in_node(); return -EINVAL; } hugetlb_early_param("hugepages", hugepages_setup); /* * hugepagesz command line processing * A specific huge page size can only be specified once with hugepagesz. * hugepagesz is followed by hugepages on the command line. The global * variable 'parsed_valid_hugepagesz' is used to determine if prior * hugepagesz argument was valid. */ static int __init hugepagesz_setup(char *s) { unsigned long size; struct hstate *h; parsed_valid_hugepagesz = false; size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); return -EINVAL; } h = size_to_hstate(size); if (h) { /* * hstate for this size already exists. This is normally * an error, but is allowed if the existing hstate is the * default hstate. More specifically, it is only allowed if * the number of huge pages for the default hstate was not * previously specified. */ if (!parsed_default_hugepagesz || h != &default_hstate || default_hstate.max_huge_pages) { pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); return -EINVAL; } /* * No need to call hugetlb_add_hstate() as hstate already * exists. But, do set parsed_hstate so that a following * hugepages= parameter will be applied to this hstate. */ parsed_hstate = h; parsed_valid_hugepagesz = true; return 0; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; return 0; } hugetlb_early_param("hugepagesz", hugepagesz_setup); /* * default_hugepagesz command line input * Only one instance of default_hugepagesz allowed on command line. */ static int __init default_hugepagesz_setup(char *s) { unsigned long size; int i; parsed_valid_hugepagesz = false; if (parsed_default_hugepagesz) { pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); return -EINVAL; } size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); return -EINVAL; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; parsed_default_hugepagesz = true; default_hstate_idx = hstate_index(size_to_hstate(size)); /* * The number of default huge pages (for this size) could have been * specified as the first hugetlb parameter: hugepages=X. If so, * then default_hstate_max_huge_pages is set. If the default huge * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be * allocated here from bootmem allocator. */ if (default_hstate_max_huge_pages) { default_hstate.max_huge_pages = default_hstate_max_huge_pages; /* * Since this is an early parameter, we can't check * NUMA node state yet, so loop through MAX_NUMNODES. */ for (i = 0; i < MAX_NUMNODES; i++) { if (default_hugepages_in_node[i] != 0) default_hstate.max_huge_pages_node[i] = default_hugepages_in_node[i]; } default_hstate_max_huge_pages = 0; } return 0; } hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); void __init hugetlb_bootmem_set_nodes(void) { int i, nid; unsigned long start_pfn, end_pfn; if (!nodes_empty(hugetlb_bootmem_nodes)) return; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { if (end_pfn > start_pfn) node_set(nid, hugetlb_bootmem_nodes); } } static bool __hugetlb_bootmem_allocated __initdata; bool __init hugetlb_bootmem_allocated(void) { return __hugetlb_bootmem_allocated; } void __init hugetlb_bootmem_alloc(void) { struct hstate *h; int i; if (__hugetlb_bootmem_allocated) return; hugetlb_bootmem_set_nodes(); for (i = 0; i < MAX_NUMNODES; i++) INIT_LIST_HEAD(&huge_boot_pages[i]); hugetlb_parse_params(); for_each_hstate(h) { h->next_nid_to_alloc = first_online_node; if (hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } __hugetlb_bootmem_allocated = true; } /* * hugepage_alloc_threads command line parsing. * * When set, use this specific number of threads for the boot * allocation of hugepages. */ static int __init hugepage_alloc_threads_setup(char *s) { unsigned long allocation_threads; if (kstrtoul(s, 0, &allocation_threads) != 0) return 1; if (allocation_threads == 0) return 1; hugepage_allocation_threads = allocation_threads; return 1; } __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup); static unsigned int allowed_mems_nr(struct hstate *h) { int node; unsigned int nr = 0; nodemask_t *mbind_nodemask; unsigned int *array = h->free_huge_pages_node; gfp_t gfp_mask = htlb_alloc_mask(h); mbind_nodemask = policy_mbind_nodemask(gfp_mask); for_each_node_mask(node, cpuset_current_mems_allowed) { if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) nr += array[node]; } return nr; } void hugetlb_report_meminfo(struct seq_file *m) { struct hstate *h; unsigned long total = 0; if (!hugepages_supported()) return; for_each_hstate(h) { unsigned long count = h->nr_huge_pages; total += huge_page_size(h) * count; if (h == &default_hstate) seq_printf(m, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %8lu kB\n", count, h->free_huge_pages, h->resv_huge_pages, h->surplus_huge_pages, huge_page_size(h) / SZ_1K); } seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); } int hugetlb_report_node_meminfo(char *buf, int len, int nid) { struct hstate *h = &default_hstate; if (!hugepages_supported()) return 0; return sysfs_emit_at(buf, len, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", nid, h->nr_huge_pages_node[nid], nid, h->free_huge_pages_node[nid], nid, h->surplus_huge_pages_node[nid]); } void hugetlb_show_meminfo_node(int nid) { struct hstate *h; if (!hugepages_supported()) return; for_each_hstate(h) printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", nid, h->nr_huge_pages_node[nid], h->free_huge_pages_node[nid], h->surplus_huge_pages_node[nid], huge_page_size(h) / SZ_1K); } void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) { seq_printf(m, "HugetlbPages:\t%8lu kB\n", K(atomic_long_read(&mm->hugetlb_usage))); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { struct hstate *h; unsigned long nr_total_pages = 0; for_each_hstate(h) nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; if (!delta) return 0; spin_lock_irq(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such * reservation is completely rubbish in the presence of cpuset because * the reservation is not checked against page availability for the * current cpuset. Application can still potentially OOM'ed by kernel * with lack of free htlb page in cpuset that the task is in. * Attempt to enforce strict accounting with cpuset is almost * impossible (or too ugly) because cpuset is too fluid that * task or memory node can be dynamically moved between cpusets. * * The change of semantics for shared hugetlb mapping with cpuset is * undesirable. However, in order to preserve some of the semantics, * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. * * Apart from cpuset, we also have memory policy mechanism that * also determines from which node the kernel will allocate memory * in a NUMA system. So similar to cpuset, we also should consider * the memory policy of the current task. Similar to the description * above. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; if (delta > allowed_mems_nr(h)) { return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock_irq(&hugetlb_lock); return ret; } static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *resv = vma_resv_map(vma); /* * HPAGE_RESV_OWNER indicates a private mapping. * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { resv_map_dup_hugetlb_cgroup_uncharge_info(resv); kref_get(&resv->refs); } /* * vma_lock structure for sharable mappings is vma specific. * Clear old pointer (if copied via vm_area_dup) and allocate * new structure. Before clearing, make sure vma_lock is not * for this vma. */ if (vma->vm_flags & VM_MAYSHARE) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; if (vma_lock) { if (vma_lock->vma != vma) { vma->vm_private_data = NULL; hugetlb_vma_lock_alloc(vma); } else pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); } else hugetlb_vma_lock_alloc(vma); } } static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); struct resv_map *resv; struct hugepage_subpool *spool = subpool_vma(vma); unsigned long reserve, start, end; long gbl_reserve; hugetlb_vma_lock_free(vma); resv = vma_resv_map(vma); if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return; start = vma_hugecache_offset(h, vma, vma->vm_start); end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(resv, start, end); hugetlb_cgroup_uncharge_counter(resv, start, end); if (reserve) { /* * Decrement reserve counts. The global reserve count may be * adjusted if the subpool has a minimum size. */ gbl_reserve = hugepage_subpool_put_pages(spool, reserve); hugetlb_acct_memory(h, -gbl_reserve); } kref_put(&resv->refs, resv_map_release); } static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) { if (addr & ~(huge_page_mask(hstate_vma(vma)))) return -EINVAL; return 0; } void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) { /* * PMD sharing is only possible for PUD_SIZE-aligned address ranges * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. * This function is called in the middle of a VMA split operation, with * MM, VMA and rmap all write-locked to prevent concurrent page table * walks (except hardware and gup_fast()). */ vma_assert_write_locked(vma); i_mmap_assert_write_locked(vma->vm_file->f_mapping); if (addr & ~PUD_MASK) { unsigned long floor = addr & PUD_MASK; unsigned long ceil = floor + PUD_SIZE; if (floor >= vma->vm_start && ceil <= vma->vm_end) { /* * Locking: * Use take_locks=false here. * The file rmap lock is already held. * The hugetlb VMA lock can't be taken when we already * hold the file rmap lock, and we don't need it because * its purpose is to synchronize against concurrent page * table walks, which are not possible thanks to the * locks held by our caller. */ hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false); } } } static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) { return huge_page_size(hstate_vma(vma)); } /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) { BUG(); return 0; } /* * When a new function is introduced to vm_operations_struct and added * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. * This is because under System V memory model, mappings created via * shmget/shmat with "huge page" specified are backed by hugetlbfs files, * their original vm_ops are overwritten with shm_vm_ops. */ const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, .may_split = hugetlb_vm_op_split, .pagesize = hugetlb_vm_op_pagesize, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio, bool try_mkwrite) { pte_t entry = folio_mk_pte(folio, vma->vm_page_prot); unsigned int shift = huge_page_shift(hstate_vma(vma)); if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { entry = pte_mkwrite_novma(pte_mkdirty(entry)); } else { entry = pte_wrprotect(entry); } entry = pte_mkyoung(entry); entry = arch_make_huge_pte(entry, shift, vma->vm_flags); return entry; } static void set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t entry; entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) update_mmu_cache(vma, address, ptep); } static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { if (vma->vm_flags & VM_WRITE) set_huge_ptep_writable(vma, address, ptep); } static void hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) { pte_t newpte = make_huge_pte(vma, new_folio, true); __folio_mark_uptodate(new_folio); hugetlb_add_new_anon_rmap(new_folio, vma, addr); if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); folio_set_hugetlb_migratable(new_folio); } int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pte_t *src_pte, *dst_pte, entry; struct folio *pte_folio; unsigned long addr; bool cow = is_cow_mapping(src_vma->vm_flags); struct hstate *h = hstate_vma(src_vma); unsigned long sz = huge_page_size(h); unsigned long npages = pages_per_huge_page(h); struct mmu_notifier_range range; unsigned long last_addr_mask; softleaf_t softleaf; int ret = 0; if (cow) { mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, src_vma->vm_start, src_vma->vm_end); mmu_notifier_invalidate_range_start(&range); vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src->write_protect_seq); } else { /* * For shared mappings the vma lock must be held before * calling hugetlb_walk() in the src vma. Otherwise, the * returned ptep could go away if part of a shared pmd and * another thread calls huge_pmd_unshare. */ hugetlb_vma_lock_read(src_vma); } last_addr_mask = hugetlb_mask_last_page(h); for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = hugetlb_walk(src_vma, addr, sz); if (!src_pte) { addr |= last_addr_mask; continue; } dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; } #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING /* If the pagetables are shared, there is nothing to do */ if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) { addr |= last_addr_mask; continue; } #endif dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); again: if (huge_pte_none(entry)) { /* Skip if src entry none. */ goto next; } softleaf = softleaf_from_pte(entry); if (unlikely(softleaf_is_hwpoison(softleaf))) { if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry, sz); } else if (unlikely(softleaf_is_migration(softleaf))) { bool uffd_wp = pte_swp_uffd_wp(entry); if (!softleaf_is_migration_read(softleaf) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. */ softleaf = make_readable_migration_entry( swp_offset(softleaf)); entry = swp_entry_to_pte(softleaf); if (userfaultfd_wp(src_vma) && uffd_wp) entry = pte_swp_mkuffd_wp(entry); set_huge_pte_at(src, addr, src_pte, entry, sz); } if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry, sz); } else if (unlikely(pte_is_marker(entry))) { const pte_marker marker = copy_pte_marker(softleaf, dst_vma); if (marker) set_huge_pte_at(dst, addr, dst_pte, make_pte_marker(marker), sz); } else { entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); pte_folio = page_folio(pte_page(entry)); folio_get(pte_folio); /* * Failing to duplicate the anon rmap is a rare case * where we see pinned hugetlb pages while they're * prone to COW. We need to do the COW earlier during * fork. * * When pre-allocating the page or copying data, we * need to be without the pgtable locks since we could * sleep during the process. */ if (!folio_test_anon(pte_folio)) { hugetlb_add_file_rmap(pte_folio); } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ new_folio = alloc_hugetlb_folio(dst_vma, addr, false); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); break; } ret = copy_user_large_folio(new_folio, pte_folio, addr, dst_vma); folio_put(pte_folio); if (ret) { folio_put(new_folio); break; } /* Install the new hugetlb folio if src pte stable */ dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); if (!pte_same(src_pte_old, entry)) { restore_reserve_on_error(h, dst_vma, addr, new_folio); folio_put(new_folio); /* huge_ptep of dst_pte won't change as in child */ goto again; } hugetlb_install_folio(dst_vma, dst_pte, addr, new_folio, src_pte_old, sz); goto next; } if (cow) { /* * No need to notify as we are downgrading page * table protection not changing it to point * to a new page. * * See Documentation/mm/mmu_notifier.rst */ huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_pte_wrprotect(entry); } if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry, sz); hugetlb_count_add(npages, dst); } next: spin_unlock(src_ptl); spin_unlock(dst_ptl); } if (cow) { raw_write_seqcount_end(&src->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } else { hugetlb_vma_unlock_read(src_vma); } return ret; } static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, unsigned long sz) { bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct hstate *h = hstate_vma(vma); struct mm_struct *mm = vma->vm_mm; spinlock_t *src_ptl, *dst_ptl; pte_t pte; dst_ptl = huge_pte_lock(h, mm, dst_pte); src_ptl = huge_pte_lockptr(h, mm, src_pte); /* * We don't have to worry about the ordering of src and dst ptlocks * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. */ if (src_ptl != dst_ptl) spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) { huge_pte_clear(mm, new_addr, dst_pte, sz); } else { if (need_clear_uffd_wp) { if (pte_present(pte)) pte = huge_pte_clear_uffd_wp(pte); else pte = pte_swp_clear_uffd_wp(pte); } set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); } if (src_ptl != dst_ptl) spin_unlock(src_ptl); spin_unlock(dst_ptl); } int move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) { struct hstate *h = hstate_vma(vma); struct address_space *mapping = vma->vm_file->f_mapping; unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; unsigned long old_end = old_addr + len; unsigned long last_addr_mask; pte_t *src_pte, *dst_pte; struct mmu_notifier_range range; bool shared_pmd = false; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, old_end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* * In case of shared PMDs, we should cover the maximum possible * range. */ flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); last_addr_mask = hugetlb_mask_last_page(h); /* Prevent race with file truncation */ hugetlb_vma_lock_write(vma); i_mmap_lock_write(mapping); for (; old_addr < old_end; old_addr += sz, new_addr += sz) { src_pte = hugetlb_walk(vma, old_addr, sz); if (!src_pte) { old_addr |= last_addr_mask; new_addr |= last_addr_mask; continue; } if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) continue; if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { shared_pmd = true; old_addr |= last_addr_mask; new_addr |= last_addr_mask; continue; } dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); if (!dst_pte) break; move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); } if (shared_pmd) flush_hugetlb_tlb_range(vma, range.start, range.end); else flush_hugetlb_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); i_mmap_unlock_write(mapping); hugetlb_vma_unlock_write(vma); return len + old_addr - old_end; } void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct folio *folio, zap_flags_t zap_flags) { struct mm_struct *mm = vma->vm_mm; const bool folio_provided = !!folio; unsigned long address; pte_t *ptep; pte_t pte; spinlock_t *ptl; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); bool adjust_reservation; unsigned long last_addr_mask; bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); /* * This is a hugetlb vma, all the pte entries should point * to huge page. */ tlb_change_page_size(tlb, sz); tlb_start_vma(tlb, vma); last_addr_mask = hugetlb_mask_last_page(h); address = start; for (; address < end; address += sz) { ptep = hugetlb_walk(vma, address, sz); if (!ptep) { address |= last_addr_mask; continue; } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, address, ptep)) { spin_unlock(ptl); tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); force_flush = true; address |= last_addr_mask; continue; } pte = huge_ptep_get(mm, address, ptep); if (huge_pte_none(pte)) { spin_unlock(ptl); continue; } /* * Migrating hugepage or HWPoisoned hugepage is already * unmapped and its refcount is dropped, so just clear pte here. */ if (unlikely(!pte_present(pte))) { /* * If the pte was wr-protected by uffd-wp in any of the * swap forms, meanwhile the caller does not want to * drop the uffd-wp bit in this zap, then replace the * pte with a marker. */ if (pte_swp_uffd_wp_any(pte) && !(zap_flags & ZAP_FLAG_DROP_MARKER)) set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP), sz); else huge_pte_clear(mm, address, ptep, sz); spin_unlock(ptl); continue; } /* * If a folio is supplied, it is because a specific * folio is being unmapped, not a range. Ensure the folio we * are about to unmap is the actual folio of interest. */ if (folio_provided) { if (folio != page_folio(pte_page(pte))) { spin_unlock(ptl); continue; } /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than * looking like data was lost */ set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } else { folio = page_folio(pte_page(pte)); } pte = huge_ptep_get_and_clear(mm, address, ptep, sz); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) folio_mark_dirty(folio); /* Leave a uffd-wp pte marker if needed */ if (huge_pte_uffd_wp(pte) && !(zap_flags & ZAP_FLAG_DROP_MARKER)) set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP), sz); hugetlb_count_sub(pages_per_huge_page(h), mm); hugetlb_remove_rmap(folio); spin_unlock(ptl); /* * Restore the reservation for anonymous page, otherwise the * backing page could be stolen by someone. * If there we are freeing a surplus, do not set the restore * reservation bit. */ adjust_reservation = false; spin_lock_irq(&hugetlb_lock); if (!h->surplus_huge_pages && __vma_private_lock(vma) && folio_test_anon(folio)) { folio_set_hugetlb_restore_reserve(folio); /* Reservation to be adjusted after the spin lock */ adjust_reservation = true; } spin_unlock_irq(&hugetlb_lock); /* * Adjust the reservation for the region that will have the * reserve restored. Keep in mind that vma_needs_reservation() changes * resv->adds_in_progress if it succeeds. If this is not done, * do_exit() will not see it, and will keep the reservation * forever. */ if (adjust_reservation) { int rc = vma_needs_reservation(h, vma, address); if (rc < 0) /* Pressumably allocate_file_region_entries failed * to allocate a file_region struct. Clear * hugetlb_restore_reserve so that global reserve * count will not be incremented by free_huge_folio. * Act as if we consumed the reservation. */ folio_clear_hugetlb_restore_reserve(folio); else if (rc) vma_add_reservation(h, vma, address); } tlb_remove_page_size(tlb, folio_page(folio, 0), folio_size(folio)); /* * If we were instructed to unmap a specific folio, we're done. */ if (folio_provided) break; } tlb_end_vma(tlb, vma); /* * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We * could defer the flush until now, since by holding i_mmap_rwsem we * guaranteed that the last reference would not be dropped. But we must * do the flushing before we return, as otherwise i_mmap_rwsem will be * dropped and the last reference to the shared PMDs page might be * dropped as well. * * In theory we could defer the freeing of the PMD pages as well, but * huge_pmd_unshare() relies on the exact page_count for the PMD page to * detect sharing, so we cannot defer the release of the page either. * Instead, do flush now. */ if (force_flush) tlb_flush_mmu_tlbonly(tlb); } void __hugetlb_zap_begin(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { if (!vma->vm_file) /* hugetlbfs_file_mmap error */ return; adjust_range_if_pmd_sharing_possible(vma, start, end); hugetlb_vma_lock_write(vma); if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); } void __hugetlb_zap_end(struct vm_area_struct *vma, struct zap_details *details) { zap_flags_t zap_flags = details ? details->zap_flags : 0; if (!vma->vm_file) /* hugetlbfs_file_mmap error */ return; if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ /* * Unlock and free the vma lock before releasing i_mmap_rwsem. * When the vma_lock is freed, this makes the vma ineligible * for pmd sharing. And, i_mmap_rwsem is required to set up * pmd sharing. This is important as page tables for this * unmapped range will be asynchrously deleted. If the page * tables are shared, there will be issues when accessed by * someone else. */ __hugetlb_vma_unlock_write_free(vma); } else { hugetlb_vma_unlock_write(vma); } if (vma->vm_file) i_mmap_unlock_write(vma->vm_file->f_mapping); } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct folio *folio, zap_flags_t zap_flags) { struct mmu_notifier_range range; struct mmu_gather tlb; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); mmu_notifier_invalidate_range_start(&range); tlb_gather_mmu(&tlb, vma->vm_mm); __unmap_hugepage_range(&tlb, vma, start, end, folio, zap_flags); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); } /* * This is called when the original mapper is failing to COW a MAP_PRIVATE * mapping it owns the reserve page for. The intention is to unmap the page * from other VMAs and let the children be SIGKILLed if they are faulting the * same region. */ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct folio *folio, unsigned long address) { struct hstate *h = hstate_vma(vma); struct vm_area_struct *iter_vma; struct address_space *mapping; pgoff_t pgoff; /* * vm_pgoff is in PAGE_SIZE units, hence the different calculation * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; mapping = vma->vm_file->f_mapping; /* * Take the mapping lock for the duration of the table walk. As * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ i_mmap_lock_write(mapping); vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) continue; /* * Shared VMAs have their own reserves and do not affect * MAP_PRIVATE accounting but it is possible that a shared * VMA is using the same page so check and skip such VMAs. */ if (iter_vma->vm_flags & VM_MAYSHARE) continue; /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA * could insert a zeroed page instead of the data existing * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), folio, 0); } i_mmap_unlock_write(mapping); } /* * hugetlb_wp() should be called with page lock of the original hugepage held. * Called with hugetlb_fault_mutex_table held and pte_page locked so we * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ static vm_fault_t hugetlb_wp(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); struct hstate *h = hstate_vma(vma); struct folio *old_folio; struct folio *new_folio; bool cow_from_owner = 0; vm_fault_t ret = 0; struct mmu_notifier_range range; /* * Never handle CoW for uffd-wp protected pages. It should be only * handled when the uffd-wp protection is removed. * * Note that only the CoW optimization path (in hugetlb_no_page()) * can trigger this, because hugetlb_fault() will always resolve * uffd-wp bit first. */ if (!unshare && huge_pte_uffd_wp(pte)) return 0; /* Let's take out MAP_SHARED mappings first. */ if (vma->vm_flags & VM_MAYSHARE) { set_huge_ptep_writable(vma, vmf->address, vmf->pte); return 0; } old_folio = page_folio(pte_page(pte)); delayacct_wpcopy_start(); retry_avoidcopy: /* * If no-one else is actually using this page, we're the exclusive * owner and can reuse this page. * * Note that we don't rely on the (safer) folio refcount here, because * copying the hugetlb folio when there are unexpected (temporary) * folio references could harm simple fork()+exit() users when * we run out of free hugetlb folios: we would have to kill processes * in scenarios that used to work. As a side effect, there can still * be leaks between processes, for example, with FOLL_GET users. */ if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { if (!PageAnonExclusive(&old_folio->page)) { folio_move_anon_rmap(old_folio, vma); SetPageAnonExclusive(&old_folio->page); } if (likely(!unshare)) set_huge_ptep_maybe_writable(vma, vmf->address, vmf->pte); delayacct_wpcopy_end(); return 0; } VM_BUG_ON_PAGE(folio_test_anon(old_folio) && PageAnonExclusive(&old_folio->page), &old_folio->page); /* * If the process that created a MAP_PRIVATE mapping is about to perform * a COW due to a shared page count, attempt to satisfy the allocation * without using the existing reserves. * In order to determine where this is a COW on a MAP_PRIVATE mapping it * is enough to check whether the old_folio is anonymous. This means that * the reserve for this address was consumed. If reserves were used, a * partial faulted mapping at the fime of fork() could consume its reserves * on COW instead of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && folio_test_anon(old_folio)) cow_from_owner = true; folio_get(old_folio); /* * Drop page table lock as buddy allocator may be called. It will * be acquired again before returning to the caller, as expected. */ spin_unlock(vmf->ptl); new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); if (IS_ERR(new_folio)) { /* * If a process owning a MAP_PRIVATE mapping fails to COW, * it is due to references held by a child and an insufficient * huge page pool. To guarantee the original mappers * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ if (cow_from_owner) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx; u32 hash; folio_put(old_folio); /* * Drop hugetlb_fault_mutex and vma_lock before * unmapping. unmapping needs to hold vma_lock * in write mode. Dropping vma_lock in read mode * here is OK as COW mappings do not interact with * PMD sharing. * * Reacquire both after unmap operation. */ idx = vma_hugecache_offset(h, vma, vmf->address); hash = hugetlb_fault_mutex_hash(mapping, idx); hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); unmap_ref_private(mm, vma, old_folio, vmf->address); mutex_lock(&hugetlb_fault_mutex_table[hash]); hugetlb_vma_lock_read(vma); spin_lock(vmf->ptl); vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) goto retry_avoidcopy; /* * race occurs while re-acquiring page table * lock, and our job is done. */ delayacct_wpcopy_end(); return 0; } ret = vmf_error(PTR_ERR(new_folio)); goto out_release_old; } /* * When the original hugepage is shared one, it does not have * anon_vma prepared. */ ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out_release_all; if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto out_release_all; } __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, vmf->address + huge_page_size(h)); mmu_notifier_invalidate_range_start(&range); /* * Retake the page table lock to check for racing updates * before the page tables are altered */ spin_lock(vmf->ptl); vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { pte_t newpte = make_huge_pte(vma, new_folio, !unshare); /* Break COW or unshare */ huge_ptep_clear_flush(vma, vmf->address, vmf->pte); hugetlb_remove_rmap(old_folio); hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, huge_page_size(h)); folio_set_hugetlb_migratable(new_folio); /* Make the old page be freed below */ new_folio = old_folio; } spin_unlock(vmf->ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: /* * No restore in case of successful pagetable update (Break COW or * unshare) */ if (new_folio != old_folio) restore_reserve_on_error(h, vma, vmf->address, new_folio); folio_put(new_folio); out_release_old: folio_put(old_folio); spin_lock(vmf->ptl); /* Caller expects lock to be held */ delayacct_wpcopy_end(); return ret; } /* * Return whether there is a pagecache page to back given address within VMA. */ bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = linear_page_index(vma, address); struct folio *folio; folio = filemap_get_folio(mapping, idx); if (IS_ERR(folio)) return false; folio_put(folio); return true; } int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx) { struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); int err; idx <<= huge_page_order(h); __folio_set_locked(folio); err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); if (unlikely(err)) { __folio_clear_locked(folio); return err; } folio_clear_hugetlb_restore_reserve(folio); /* * mark folio dirty so that it will not be removed from cache/file * by non-hugetlbfs specific code paths. */ folio_mark_dirty(folio); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); return 0; } static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, struct address_space *mapping, unsigned long reason) { u32 hash; /* * vma_lock and hugetlb_fault_mutex must be dropped before handling * userfault. Also mmap_lock could be dropped due to handling * userfault, any vma operation should be careful from here. */ hugetlb_vma_unlock_read(vmf->vma); hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return handle_userfault(vmf, reason); } /* * Recheck pte with pgtable lock. Returns true if pte didn't change, or * false if pte changed or is changing. */ static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t old_pte) { spinlock_t *ptl; bool same; ptl = huge_pte_lock(h, mm, ptep); same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); spin_unlock(ptl); return same; } static vm_fault_t hugetlb_no_page(struct address_space *mapping, struct vm_fault *vmf) { u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); bool new_folio, new_anon_folio = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct hstate *h = hstate_vma(vma); vm_fault_t ret = VM_FAULT_SIGBUS; bool folio_locked = true; struct folio *folio; unsigned long size; pte_t new_pte; /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed * COW/unsharing. Warn that such a situation has occurred as it may not * be obvious. */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); goto out; } /* * Use page lock to guard against racing truncation * before we get page_table_lock. */ new_folio = false; folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); if (IS_ERR(folio)) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (vmf->pgoff >= size) goto out; /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { /* * Since hugetlb_no_page() was examining pte * without pgtable lock, we need to re-test under * lock because the pte may not be stable and could * have changed from under us. Try to detect * either changed or during-changing ptes and retry * properly when needed. * * Note that userfaultfd is actually fine with * false positives (e.g. caused by pte changed), * but not wrong logical events (e.g. caused by * reading a pte during changing). The latter can * confuse the userspace, so the strictness is very * much preferred. E.g., MISSING event should * never happen on the page after UFFDIO_COPY has * correctly installed the page and returned. */ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { ret = 0; goto out; } return hugetlb_handle_userfault(vmf, mapping, VM_UFFD_MISSING); } if (!(vma->vm_flags & VM_MAYSHARE)) { ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out; } folio = alloc_hugetlb_folio(vma, vmf->address, false); if (IS_ERR(folio)) { /* * Returning error will result in faulting task being * sent SIGBUS. The hugetlb fault mutex prevents two * tasks from racing to fault in the same page which * could result in false unable to allocate errors. * Page migration does not take the fault mutex, but * does a clear then write of pte's under page table * lock. Page fault code could race with migration, * notice the clear pte and try to allocate a page * here. Before returning error, get ptl and make * sure there really is no pte entry. */ if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) ret = vmf_error(PTR_ERR(folio)); else ret = 0; goto out; } folio_zero_user(folio, vmf->real_address); __folio_mark_uptodate(folio); new_folio = true; if (vma->vm_flags & VM_MAYSHARE) { int err = hugetlb_add_to_page_cache(folio, mapping, vmf->pgoff); if (err) { /* * err can't be -EEXIST which implies someone * else consumed the reservation since hugetlb * fault mutex is held when add a hugetlb page * to the page cache. So it's safe to call * restore_reserve_on_error() here. */ restore_reserve_on_error(h, vma, vmf->address, folio); folio_put(folio); ret = VM_FAULT_SIGBUS; goto out; } } else { new_anon_folio = true; folio_lock(folio); } } else { /* * If memory error occurs between mmap() and fault, some process * don't have hwpoisoned swap entry for errored virtual address. * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(folio_test_hwpoison(folio))) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } /* Check for page in userfault range. */ if (userfaultfd_minor(vma)) { folio_unlock(folio); folio_put(folio); /* See comment in userfaultfd_missing() block above */ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { ret = 0; goto out; } return hugetlb_handle_userfault(vmf, mapping, VM_UFFD_MINOR); } } /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that * any allocations necessary to record that reservation occur outside * the spinlock. */ if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, vmf->address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, vmf->address); } vmf->ptl = huge_pte_lock(h, mm, vmf->pte); ret = 0; /* If pte changed from under us, retry */ if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) goto backout; if (new_anon_folio) hugetlb_add_new_anon_rmap(folio, vma, vmf->address); else hugetlb_add_file_rmap(folio); new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED); /* * If this pte was previously wr-protected, keep it wr-protected even * if populated. */ if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte))) new_pte = huge_pte_mkuffd_wp(new_pte); set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); hugetlb_count_add(pages_per_huge_page(h), mm); if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* * No need to keep file folios locked. See comment in * hugetlb_fault(). */ if (!new_anon_folio) { folio_locked = false; folio_unlock(folio); } /* Optimization, do the COW without a second fault */ ret = hugetlb_wp(vmf); } spin_unlock(vmf->ptl); /* * Only set hugetlb_migratable in newly allocated pages. Existing pages * found in the pagecache may not have hugetlb_migratable if they have * been isolated for migration. */ if (new_folio) folio_set_hugetlb_migratable(folio); if (folio_locked) folio_unlock(folio); out: hugetlb_vma_unlock_read(vma); /* * We must check to release the per-VMA lock. __vmf_anon_prepare() is * the only way ret can be set to VM_FAULT_RETRY. */ if (unlikely(ret & VM_FAULT_RETRY)) vma_end_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return ret; backout: spin_unlock(vmf->ptl); backout_unlocked: /* We only need to restore reservations for private mappings */ if (new_anon_folio) restore_reserve_on_error(h, vma, vmf->address, folio); folio_unlock(folio); folio_put(folio); goto out; } #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) { unsigned long key[2]; u32 hash; key[0] = (unsigned long) mapping; key[1] = idx; hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); return hash & (num_fault_mutexes - 1); } #else /* * For uniprocessor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) { return 0; } #endif vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { vm_fault_t ret; u32 hash; struct folio *folio = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; bool need_wait_lock = false; struct vm_fault vmf = { .vma = vma, .address = address & huge_page_mask(h), .real_address = address, .flags = flags, .pgoff = vma_hugecache_offset(h, vma, address & huge_page_mask(h)), /* TODO: Track hugetlb faults using vm_fault */ /* * Some fields may not be initialized, be careful as it may * be hard to debug if called functions make assumptions */ }; /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ mapping = vma->vm_file->f_mapping; hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* * Acquire vma lock before calling huge_pte_alloc and hold * until finished with vmf.pte. This prevents huge_pmd_unshare from * being called elsewhere and making the vmf.pte no longer valid. */ hugetlb_vma_lock_read(vma); vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); if (!vmf.pte) { hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return VM_FAULT_OOM; } vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); if (huge_pte_none(vmf.orig_pte)) /* * hugetlb_no_page will drop vma lock and hugetlb fault * mutex internally, which make us return immediately. */ return hugetlb_no_page(mapping, &vmf); if (pte_is_marker(vmf.orig_pte)) { const pte_marker marker = softleaf_to_marker(softleaf_from_pte(vmf.orig_pte)); if (marker & PTE_MARKER_POISONED) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto out_mutex; } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) { /* This isn't supported in hugetlb. */ ret = VM_FAULT_SIGSEGV; goto out_mutex; } return hugetlb_no_page(mapping, &vmf); } ret = 0; /* Not present, either a migration or a hwpoisoned entry */ if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) { const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte); if (softleaf_is_migration(softleaf)) { /* * Release the hugetlb fault lock now, but retain * the vma lock, because it is needed to guard the * huge_pte_lockptr() later in * migration_entry_wait_huge(). The vma lock will * be released there. */ mutex_unlock(&hugetlb_fault_mutex_table[hash]); migration_entry_wait_huge(vma, vmf.address, vmf.pte); return 0; } if (softleaf_is_hwpoison(softleaf)) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); } goto out_mutex; } /* * If we are going to COW/unshare the mapping later, we examine the * pending reservations for this page now. This will ensure that any * allocations necessary to record that reservation occur outside the * spinlock. */ if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { if (vma_needs_reservation(h, vma, vmf.address) < 0) { ret = VM_FAULT_OOM; goto out_mutex; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, vmf.address); } vmf.ptl = huge_pte_lock(h, mm, vmf.pte); /* Check for a racing update before calling hugetlb_wp() */ if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) goto out_ptl; /* Handle userfault-wp first, before trying to lock more pages */ if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { if (!userfaultfd_wp_async(vma)) { spin_unlock(vmf.ptl); hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return handle_userfault(&vmf, VM_UFFD_WP); } vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, huge_page_size(hstate_vma(vma))); /* Fallthrough to CoW */ } if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(vmf.orig_pte)) { /* * Anonymous folios need to be lock since hugetlb_wp() * checks whether we can re-use the folio exclusively * for us in case we are the only user of it. */ folio = page_folio(pte_page(vmf.orig_pte)); if (folio_test_anon(folio) && !folio_trylock(folio)) { need_wait_lock = true; goto out_ptl; } folio_get(folio); ret = hugetlb_wp(&vmf); if (folio_test_anon(folio)) folio_unlock(folio); folio_put(folio); goto out_ptl; } else if (likely(flags & FAULT_FLAG_WRITE)) { vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); } } vmf.orig_pte = pte_mkyoung(vmf.orig_pte); if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, vmf.address, vmf.pte); out_ptl: spin_unlock(vmf.ptl); out_mutex: hugetlb_vma_unlock_read(vma); /* * We must check to release the per-VMA lock. __vmf_anon_prepare() in * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. */ if (unlikely(ret & VM_FAULT_RETRY)) vma_end_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * hugetlb_wp drops all the locks, but the folio lock, before trying to * unmap the folio from other processes. During that window, if another * process mapping that folio faults in, it will take the mutex and then * it will wait on folio_lock, causing an ABBA deadlock. * Use trylock instead and bail out if we fail. * * Ideally, we should hold a refcount on the folio we wait for, but we do * not want to use the folio after it becomes unlocked, but rather just * wait for it to become unlocked, so hopefully next fault successes on * the trylock. */ if (need_wait_lock) folio_wait_locked(folio); return ret; } #ifdef CONFIG_USERFAULTFD /* * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). */ static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct mempolicy *mpol; nodemask_t *nodemask; struct folio *folio; gfp_t gfp_mask; int node; gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); /* * This is used to allocate a temporary hugetlb to hold the copied * content, which will then be copied again to the final hugetlb * consuming a reservation. Set the alloc_fallback to false to indicate * that breaking the per-node hugetlb pool is not allowed in this case. */ folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); mpol_cond_put(mpol); return folio; } /* * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte * with modifications for hugetlb pages. */ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) { struct mm_struct *dst_mm = dst_vma->vm_mm; bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); bool wp_enabled = (flags & MFILL_ATOMIC_WP); struct hstate *h = hstate_vma(dst_vma); struct address_space *mapping = dst_vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); unsigned long size = huge_page_size(h); int vm_shared = dst_vma->vm_flags & VM_SHARED; pte_t _dst_pte; spinlock_t *ptl; int ret = -ENOMEM; struct folio *folio; bool folio_in_pagecache = false; pte_t dst_ptep; if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { ptl = huge_pte_lock(h, dst_mm, dst_pte); /* Don't overwrite any existing PTEs (even markers) */ if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { spin_unlock(ptl); return -EEXIST; } _dst_pte = make_pte_marker(PTE_MARKER_POISONED); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); return 0; } if (is_continue) { ret = -EFAULT; folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) goto out; folio_in_pagecache = true; } else if (!*foliop) { /* If a folio already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. */ if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { ret = -EEXIST; goto out; } folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE); if (actual_pte) { ret = -EEXIST; goto out; } ret = -ENOMEM; goto out; } ret = copy_folio_from_user(folio, (const void __user *) src_addr, false); /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; /* Free the allocated folio which may have * consumed a reservation. */ restore_reserve_on_error(h, dst_vma, dst_addr, folio); folio_put(folio); /* Allocate a temporary folio to hold the copied * contents. */ folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); if (!folio) { ret = -ENOMEM; goto out; } *foliop = folio; /* Set the outparam foliop and return to the caller to * copy the contents outside the lock. Don't free the * folio. */ goto out; } } else { if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { folio_put(*foliop); ret = -EEXIST; *foliop = NULL; goto out; } folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { folio_put(*foliop); ret = -ENOMEM; *foliop = NULL; goto out; } ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); folio_put(*foliop); *foliop = NULL; if (ret) { folio_put(folio); goto out; } } /* * If we just allocated a new page, we need a memory barrier to ensure * that preceding stores to the page become visible before the * set_pte_at() write. The memory barrier inside __folio_mark_uptodate * is what we need. * * In the case where we have not allocated a new page (is_continue), * the page must already be uptodate. UFFDIO_CONTINUE already includes * an earlier smp_wmb() to ensure that prior stores will be visible * before the set_pte_at() write. */ if (!is_continue) __folio_mark_uptodate(folio); else WARN_ON_ONCE(!folio_test_uptodate(folio)); /* Add shared, newly allocated pages to the page cache. */ if (vm_shared && !is_continue) { ret = -EFAULT; if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) goto out_release_nounlock; /* * Serialization between remove_inode_hugepages() and * hugetlb_add_to_page_cache() below happens through the * hugetlb_fault_mutex_table that here must be hold by * the caller. */ ret = hugetlb_add_to_page_cache(folio, mapping, idx); if (ret) goto out_release_nounlock; folio_in_pagecache = true; } ptl = huge_pte_lock(h, dst_mm, dst_pte); ret = -EIO; if (folio_test_hwpoison(folio)) goto out_release_unlock; ret = -EEXIST; dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte); /* * See comment about UFFD marker overwriting in * mfill_atomic_install_pte(). */ if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep)) goto out_release_unlock; if (folio_in_pagecache) hugetlb_add_file_rmap(folio); else hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); /* * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * with wp flag set, don't set pte write bit. */ _dst_pte = make_huge_pte(dst_vma, folio, !wp_enabled && !(is_continue && !vm_shared)); /* * Always mark UFFDIO_COPY page dirty; note that this may not be * extremely important for hugetlbfs for now since swapping is not * supported, but we should still be clear in that this page cannot be * thrown away at will, even if write bit not set. */ _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); if (wp_enabled) _dst_pte = huge_pte_mkuffd_wp(_dst_pte); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); hugetlb_count_add(pages_per_huge_page(h), dst_mm); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); if (!is_continue) folio_set_hugetlb_migratable(folio); if (vm_shared || is_continue) folio_unlock(folio); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); if (vm_shared || is_continue) folio_unlock(folio); out_release_nounlock: if (!folio_in_pagecache) restore_reserve_on_error(h, dst_vma, dst_addr, folio); folio_put(folio); goto out; } #endif /* CONFIG_USERFAULTFD */ long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; unsigned long start = address; pte_t *ptep; pte_t pte; struct hstate *h = hstate_vma(vma); long pages = 0, psize = huge_page_size(h); bool shared_pmd = false; struct mmu_notifier_range range; unsigned long last_addr_mask; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; /* * In the case of shared PMDs, the area to flush could be beyond * start/end. Set range.start/range.end to cover the maximum possible * range if PMD sharing is possible. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); BUG_ON(address >= end); flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); last_addr_mask = hugetlb_mask_last_page(h); for (; address < end; address += psize) { softleaf_t entry; spinlock_t *ptl; ptep = hugetlb_walk(vma, address, psize); if (!ptep) { if (!uffd_wp) { address |= last_addr_mask; continue; } /* * Userfaultfd wr-protect requires pgtable * pre-allocations to install pte markers. */ ptep = huge_pte_alloc(mm, vma, address, psize); if (!ptep) { pages = -ENOMEM; break; } } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, address, ptep)) { /* * When uffd-wp is enabled on the vma, unshare * shouldn't happen at all. Warn about it if it * happened due to some reason. */ WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); pages++; spin_unlock(ptl); shared_pmd = true; address |= last_addr_mask; continue; } pte = huge_ptep_get(mm, address, ptep); if (huge_pte_none(pte)) { if (unlikely(uffd_wp)) /* Safe to modify directly (none->non-present). */ set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP), psize); goto next; } entry = softleaf_from_pte(pte); if (unlikely(softleaf_is_hwpoison(entry))) { /* Nothing to do. */ } else if (unlikely(softleaf_is_migration(entry))) { struct folio *folio = softleaf_to_folio(entry); pte_t newpte = pte; if (softleaf_is_migration_write(entry)) { if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry( swp_offset(entry)); else entry = make_readable_migration_entry( swp_offset(entry)); newpte = swp_entry_to_pte(entry); pages++; } if (uffd_wp) newpte = pte_swp_mkuffd_wp(newpte); else if (uffd_wp_resolve) newpte = pte_swp_clear_uffd_wp(newpte); if (!pte_same(pte, newpte)) set_huge_pte_at(mm, address, ptep, newpte, psize); } else if (unlikely(pte_is_marker(pte))) { /* * Do nothing on a poison marker; page is * corrupted, permissions do not apply. Here * pte_marker_uffd_wp()==true implies !poison * because they're mutual exclusive. */ if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve) /* Safe to modify directly (non-present->none). */ huge_pte_clear(mm, address, ptep, psize); } else { pte_t old_pte; unsigned int shift = huge_page_shift(hstate_vma(vma)); old_pte = huge_ptep_modify_prot_start(vma, address, ptep); pte = huge_pte_modify(old_pte, newprot); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (uffd_wp) pte = huge_pte_mkuffd_wp(pte); else if (uffd_wp_resolve) pte = huge_pte_clear_uffd_wp(pte); huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); pages++; } next: spin_unlock(ptl); cond_resched(); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * may have cleared our pud entry and done put_page on the page table: * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. If we actually * did unshare a page of pmds, flush the range corresponding to the pud. */ if (shared_pmd) flush_hugetlb_tlb_range(vma, range.start, range.end); else flush_hugetlb_tlb_range(vma, start, end); /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are * downgrading page table protection not changing it to point to a new * page. * * See Documentation/mm/mmu_notifier.rst */ i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); mmu_notifier_invalidate_range_end(&range); return pages > 0 ? (pages << h->order) : pages; } /* * Update the reservation map for the range [from, to]. * * Returns the number of entries that would be added to the reservation map * associated with the range [from, to]. This number is greater or equal to * zero. -EINVAL or -ENOMEM is returned in case of any errors. */ long hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_desc *desc, vm_flags_t vm_flags) { long chg = -1, add = -1, spool_resv, gbl_resv; struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map; struct hugetlb_cgroup *h_cg = NULL; long gbl_reserve, regions_needed = 0; /* This should never happen */ if (from > to) { VM_WARN(1, "%s called with a negative range\n", __func__); return -EINVAL; } /* * Only apply hugepage reservation if asked. At fault time, an * attempt will be made for VM_NORESERVE to allocate a page * without using reserves */ if (vm_flags & VM_NORESERVE) return 0; /* * Shared mappings base their reservation on the number of pages that * are already allocated on behalf of the file. Private mappings need * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !desc is a shm mapping */ if (!desc || desc->vm_flags & VM_MAYSHARE) { /* * resv_map can not be NULL as hugetlb_reserve_pages is only * called for inodes for which resv_maps were created (see * hugetlbfs_get_inode). */ resv_map = inode_resv_map(inode); chg = region_chg(resv_map, from, to, ®ions_needed); } else { /* Private mapping. */ resv_map = resv_map_alloc(); if (!resv_map) goto out_err; chg = to - from; set_vma_desc_resv_map(desc, resv_map); set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER); } if (chg < 0) goto out_err; if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), chg * pages_per_huge_page(h), &h_cg) < 0) goto out_err; if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) { /* For private mappings, the hugetlb_cgroup uncharge info hangs * of the resv_map. */ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); } /* * There must be enough pages in the subpool for the mapping. If * the subpool has a minimum size, there may be some global * reservations already in place (gbl_reserve). */ gbl_reserve = hugepage_subpool_get_pages(spool, chg); if (gbl_reserve < 0) goto out_uncharge_cgroup; /* * Check enough hugepages are available for the reservation. * Hand the pages back to the subpool if there are not */ if (hugetlb_acct_memory(h, gbl_reserve) < 0) goto out_put_pages; /* * Account for the reservations made. Shared mappings record regions * that have reservations as they are shared by multiple VMAs. * When the last VMA disappears, the region map says how much * the reservation was and the page cache tells how much of * the reservation was consumed. Private mappings are per-VMA and * only the consumed reservations are tracked. When the VMA * disappears, the original reservation is the VMA size and the * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ if (!desc || desc->vm_flags & VM_MAYSHARE) { add = region_add(resv_map, from, to, regions_needed, h, h_cg); if (unlikely(add < 0)) { hugetlb_acct_memory(h, -gbl_reserve); goto out_put_pages; } else if (unlikely(chg > add)) { /* * pages in this range were added to the reserve * map between region_chg and region_add. This * indicates a race with alloc_hugetlb_folio. Adjust * the subpool and reserve counts modified above * based on the difference. */ long rsv_adjust; /* * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the * reference to h_cg->css. See comment below for detail. */ hugetlb_cgroup_uncharge_cgroup_rsvd( hstate_index(h), (chg - add) * pages_per_huge_page(h), h_cg); rsv_adjust = hugepage_subpool_put_pages(spool, chg - add); hugetlb_acct_memory(h, -rsv_adjust); } else if (h_cg) { /* * The file_regions will hold their own reference to * h_cg->css. So we should release the reference held * via hugetlb_cgroup_charge_cgroup_rsvd() when we are * done. */ hugetlb_cgroup_put_rsvd_cgroup(h_cg); } } return chg; out_put_pages: spool_resv = chg - gbl_reserve; if (spool_resv) { /* put sub pool's reservation back, chg - gbl_reserve */ gbl_resv = hugepage_subpool_put_pages(spool, spool_resv); /* * subpool's reserved pages can not be put back due to race, * return to hstate. */ hugetlb_acct_memory(h, -gbl_resv); } out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), chg * pages_per_huge_page(h), h_cg); out_err: if (!desc || desc->vm_flags & VM_MAYSHARE) /* Only call region_abort if the region_chg succeeded but the * region_add failed or didn't run. */ if (chg >= 0 && add < 0) region_abort(resv_map, from, to, regions_needed); if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) { kref_put(&resv_map->refs, resv_map_release); set_vma_desc_resv_map(desc, NULL); } return chg < 0 ? chg : add < 0 ? add : -EINVAL; } long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); long chg = 0; struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; /* * Since this routine can be called in the evict inode path for all * hugetlbfs inodes, resv_map could be NULL. */ if (resv_map) { chg = region_del(resv_map, start, end); /* * region_del() can fail in the rare case where a region * must be split and another region descriptor can not be * allocated. If end == LONG_MAX, it will not fail. */ if (chg < 0) return chg; } spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); /* * If the subpool has a minimum size, the number of global * reservations to be released may be adjusted. * * Note that !resv_map implies freed == 0. So (chg - freed) * won't go negative. */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); return 0; } #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) { unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + svma->vm_start; unsigned long sbase = saddr & PUD_MASK; unsigned long s_end = sbase + PUD_SIZE; /* Allow segments to share if only one is marked locked */ vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; /* * match the virtual addresses, permission and the alignment of the * page table page. * * Also, vma_lock (vm_private_data) is required for sharing. */ if (pmd_index(addr) != pmd_index(saddr) || vm_flags != svm_flags || !range_in_vma(svma, sbase, s_end) || !svma->vm_private_data) return 0; return saddr; } bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { unsigned long start = addr & PUD_MASK; unsigned long end = start + PUD_SIZE; #ifdef CONFIG_USERFAULTFD if (uffd_disable_huge_pmd_share(vma)) return false; #endif /* * check on proper vm_flags and page table alignment */ if (!(vma->vm_flags & VM_MAYSHARE)) return false; if (!vma->vm_private_data) /* vma lock required for sharing */ return false; if (!range_in_vma(vma, start, end)) return false; return true; } /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible * shared pmd mappings. */ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); /* * vma needs to span at least one aligned PUD size, and the range * must be at least partially within in. */ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || (*end <= v_start) || (*start >= v_end)) return; /* Extend the range to be PUD aligned for a worst case scenario */ if (*start > v_start) *start = ALIGN_DOWN(*start, PUD_SIZE); if (*end < v_end) *end = ALIGN(*end, PUD_SIZE); } /* * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the * !shared pmd case because we can allocate the pmd later as well, it makes the * code much cleaner. pmd allocation is essential for the shared case because * pud has to be populated inside the same i_mmap_rwsem section - otherwise * racing tasks could either miss the sharing (see huge_pte_offset) or select a * bad pmd for sharing. */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; pte_t *pte; i_mmap_lock_read(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; saddr = page_table_shareable(svma, vma, addr, idx); if (saddr) { spte = hugetlb_walk(svma, saddr, vma_mmu_pagesize(svma)); if (spte) { ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); break; } } } if (!spte) goto out; spin_lock(&mm->page_table_lock); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); mm_inc_nr_pmds(mm); } else { ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); } spin_unlock(&mm->page_table_lock); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_read(mapping); return pte; } /* * unmap huge page backed by shared pte. * * Called with page table lock held. * * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long sz = huge_page_size(hstate_vma(vma)); pgd_t *pgd = pgd_offset(mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); if (sz != PMD_SIZE) return 0; if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep))) return 0; i_mmap_assert_write_locked(vma->vm_file->f_mapping); hugetlb_vma_assert_locked(vma); pud_clear(pud); /* * Once our caller drops the rmap lock, some other process might be * using this page table as a normal, non-hugetlb page table. * Wait for pending gup_fast() in other threads to finish before letting * that happen. */ tlb_remove_table_sync_one(); ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); mm_dec_nr_pmds(mm); return 1; } #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) { return NULL; } int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { return false; } #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share(vma, addr) && pud_none(*pud)) pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } } if (pte) { pte_t pteval = ptep_get_lockless(pte); BUG_ON(pte_present(pteval) && !pte_huge(pteval)); } return pte; } /* * huge_pte_offset() - Walk the page table to resolve the hugepage * entry at address @addr * * Return: Pointer to page table entry (PUD or PMD) for * address @addr, or NULL if a !p*d_present() entry is encountered and the * size @sz doesn't match the hugepage size at this level of the page * table. */ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) return NULL; pud = pud_offset(p4d, addr); if (sz == PUD_SIZE) /* must be pud huge, non-present or none */ return (pte_t *)pud; if (!pud_present(*pud)) return NULL; /* must have a valid entry and size to go further */ pmd = pmd_offset(pud, addr); /* must be pmd huge, non-present or none */ return (pte_t *)pmd; } /* * Return a mask that can be used to update an address to the last huge * page in a page table page mapping size. Used to skip non-present * page table entries when linearly scanning address ranges. Architectures * with unique huge page to page table relationships can define their own * version of this routine. */ unsigned long hugetlb_mask_last_page(struct hstate *h) { unsigned long hp_size = huge_page_size(h); if (hp_size == PUD_SIZE) return P4D_SIZE - PUD_SIZE; else if (hp_size == PMD_SIZE) return PUD_SIZE - PMD_SIZE; else return 0UL; } #else /* See description above. Architectures can provide their own version. */ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) { #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING if (huge_page_size(h) == PMD_SIZE) return PUD_SIZE - PMD_SIZE; #endif return 0UL; } #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /** * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio * @folio: the folio to isolate * @list: the list to add the folio to on success * * Isolate an allocated (refcount > 0) hugetlb folio, marking it as * isolated/non-migratable, and moving it from the active list to the * given list. * * Isolation will fail if @folio is not an allocated hugetlb folio, or if * it is already isolated/non-migratable. * * On success, an additional folio reference is taken that must be dropped * using folio_putback_hugetlb() to undo the isolation. * * Return: True if isolation worked, otherwise False. */ bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { bool ret = true; spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(folio) || !folio_test_hugetlb_migratable(folio) || !folio_try_get(folio)) { ret = false; goto unlock; } folio_clear_hugetlb_migratable(folio); list_move_tail(&folio->lru, list); unlock: spin_unlock_irq(&hugetlb_lock); return ret; } int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) { int ret = 0; *hugetlb = false; spin_lock_irq(&hugetlb_lock); if (folio_test_hugetlb(folio)) { *hugetlb = true; if (folio_test_hugetlb_freed(folio)) ret = 0; else if (folio_test_hugetlb_migratable(folio) || unpoison) ret = folio_try_get(folio); else ret = -EBUSY; } spin_unlock_irq(&hugetlb_lock); return ret; } int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) { int ret; spin_lock_irq(&hugetlb_lock); ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); spin_unlock_irq(&hugetlb_lock); return ret; } /** * folio_putback_hugetlb - unisolate a hugetlb folio * @folio: the isolated hugetlb folio * * Putback/un-isolate the hugetlb folio that was previous isolated using * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it * back onto the active list. * * Will drop the additional folio reference obtained through * folio_isolate_hugetlb(). */ void folio_putback_hugetlb(struct folio *folio) { spin_lock_irq(&hugetlb_lock); folio_set_hugetlb_migratable(folio); list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); spin_unlock_irq(&hugetlb_lock); folio_put(folio); } void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) { struct hstate *h = folio_hstate(old_folio); hugetlb_cgroup_migrate(old_folio, new_folio); folio_set_owner_migrate_reason(new_folio, reason); /* * transfer temporary state of the new hugetlb folio. This is * reverse to other transitions because the newpage is going to * be final while the old one will be freed so it takes over * the temporary status. * * Also note that we have to transfer the per-node surplus state * here as well otherwise the global surplus count will not match * the per-node's. */ if (folio_test_hugetlb_temporary(new_folio)) { int old_nid = folio_nid(old_folio); int new_nid = folio_nid(new_folio); folio_set_hugetlb_temporary(old_folio); folio_clear_hugetlb_temporary(new_folio); /* * There is no need to transfer the per-node surplus state * when we do not cross the node. */ if (new_nid == old_nid) return; spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages_node[old_nid]) { h->surplus_huge_pages_node[old_nid]--; h->surplus_huge_pages_node[new_nid]++; } spin_unlock_irq(&hugetlb_lock); } /* * Our old folio is isolated and has "migratable" cleared until it * is putback. As migration succeeded, set the new folio "migratable" * and add it to the active list. */ spin_lock_irq(&hugetlb_lock); folio_set_hugetlb_migratable(new_folio); list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist); spin_unlock_irq(&hugetlb_lock); } /* * If @take_locks is false, the caller must ensure that no concurrent page table * access can happen (except for gup_fast() and hardware page walks). * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like * concurrent page fault handling) and the file rmap lock. */ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool take_locks) { struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; struct mmu_notifier_range range; unsigned long address; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & VM_MAYSHARE)) return; if (start >= end) return; flush_cache_range(vma, start, end); /* * No need to call adjust_range_if_pmd_sharing_possible(), because * we have already done the PUD_SIZE alignment. */ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, start, end); mmu_notifier_invalidate_range_start(&range); if (take_locks) { hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); } else { i_mmap_assert_write_locked(vma->vm_file->f_mapping); } for (address = start; address < end; address += PUD_SIZE) { ptep = hugetlb_walk(vma, address, sz); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); huge_pmd_unshare(mm, vma, address, ptep); spin_unlock(ptl); } flush_hugetlb_tlb_range(vma, start, end); if (take_locks) { i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); } /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see * Documentation/mm/mmu_notifier.rst. */ mmu_notifier_invalidate_range_end(&range); } /* * This function will unconditionally remove all the shared pmd pgtable entries * within the specific vma for a hugetlbfs memory range. */ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), ALIGN_DOWN(vma->vm_end, PUD_SIZE), /* take_locks = */ true); } /* * For hugetlb, mremap() is an odd edge case - while the VMA copying is * performed, we permit both the old and new VMAs to reference the same * reservation. * * We fix this up after the operation succeeds, or if a newly allocated VMA * is closed as a result of a failure to allocate memory. */ void fixup_hugetlb_reservations(struct vm_area_struct *vma) { if (is_vm_hugetlb_page(vma)) clear_vma_resv_huge_pages(vma); } |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 | // SPDX-License-Identifier: GPL-2.0-or-later /* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21. * The device plugs into both the USB and an analog audio input, so this thing * only deals with initialisation and frequency setting, the * audio data has to be handled by a sound driver. * * Major issue: I can't find out where the device reports the signal * strength, and indeed the windows software appearantly just looks * at the stereo indicator as well. So, scanning will only find * stereo stations. Sad, but I can't help it. * * Also, the windows program sends oodles of messages over to the * device, and I couldn't figure out their meaning. My suspicion * is that they don't have any:-) * * You might find some interesting stuff about this module at * http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. * * Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> /* * Version Information */ MODULE_AUTHOR("Markus Demleitner <msdemlei@tucana.harvard.edu>"); MODULE_DESCRIPTION("D-Link DSB-R100 USB FM radio driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.1.0"); #define DSB100_VENDOR 0x04b4 #define DSB100_PRODUCT 0x1002 /* Commands the device appears to understand */ #define DSB100_TUNE 1 #define DSB100_ONOFF 2 #define TB_LEN 16 /* Frequency limits in MHz -- these are European values. For Japanese devices, that would be 76 and 91. */ #define FREQ_MIN 87.5 #define FREQ_MAX 108.0 #define FREQ_MUL 16000 #define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev) static int radio_nr = -1; module_param(radio_nr, int, 0); /* Data for one (physical) device */ struct dsbr100_device { struct usb_device *usbdev; struct video_device videodev; struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; u8 *transfer_buffer; struct mutex v4l2_lock; int curfreq; bool stereo; bool muted; }; /* Low-level device interface begins here */ /* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ static int dsbr100_setfreq(struct dsbr100_device *radio, unsigned freq) { unsigned f = (freq / 16 * 80) / 1000 + 856; int retval = 0; if (!radio->muted) { retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_TUNE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, (f >> 8) & 0x00ff, f & 0xff, radio->transfer_buffer, 8, 300); if (retval >= 0) mdelay(1); } if (retval >= 0) { radio->curfreq = freq; return 0; } dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_TUNE); return retval; } /* switch on radio */ static int dsbr100_start(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x01, 0x00, radio->transfer_buffer, 8, 300); if (retval >= 0) return dsbr100_setfreq(radio, radio->curfreq); dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_ONOFF); return retval; } /* switch off radio */ static int dsbr100_stop(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x00, radio->transfer_buffer, 8, 300); if (retval >= 0) return 0; dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_ONOFF); return retval; } /* return the device status. This is, in effect, just whether it sees a stereo signal or not. Pity. */ static void dsbr100_getstat(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), USB_REQ_GET_STATUS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x24, radio->transfer_buffer, 8, 300); if (retval < 0) { radio->stereo = false; dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, USB_REQ_GET_STATUS); } else { radio->stereo = !(radio->transfer_buffer[0] & 0x01); } } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct dsbr100_device *radio = video_drvdata(file); strscpy(v->driver, "dsbr100", sizeof(v->driver)); strscpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct dsbr100_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; dsbr100_getstat(radio); strscpy(v->name, "FM", sizeof(v->name)); v->type = V4L2_TUNER_RADIO; v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->rxsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; v->audmode = V4L2_TUNER_MODE_STEREO; v->signal = radio->stereo ? 0xffff : 0; /* We can't get the signal strength */ return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct dsbr100_device *radio = video_drvdata(file); if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; return dsbr100_setfreq(radio, clamp_t(unsigned, f->frequency, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL)); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct dsbr100_device *radio = video_drvdata(file); if (f->tuner) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } static int usb_dsbr100_s_ctrl(struct v4l2_ctrl *ctrl) { struct dsbr100_device *radio = container_of(ctrl->handler, struct dsbr100_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: radio->muted = ctrl->val; return radio->muted ? dsbr100_stop(radio) : dsbr100_start(radio); } return -EINVAL; } /* USB subsystem interface begins here */ /* * Handle unplugging of the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_dsbr100_video_device_release */ static void usb_dsbr100_disconnect(struct usb_interface *intf) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); /* * Disconnect is also called on unload, and in that case we need to * mute the device. This call will silently fail if it is called * after a physical disconnect. */ usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x00, radio->transfer_buffer, 8, 300); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->videodev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->v4l2_lock); v4l2_device_put(&radio->v4l2_dev); } /* Suspend device - stop device. */ static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); if (!radio->muted && dsbr100_stop(radio) < 0) dev_warn(&intf->dev, "dsbr100_stop failed\n"); mutex_unlock(&radio->v4l2_lock); dev_info(&intf->dev, "going into suspend..\n"); return 0; } /* Resume device - start device. */ static int usb_dsbr100_resume(struct usb_interface *intf) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); if (!radio->muted && dsbr100_start(radio) < 0) dev_warn(&intf->dev, "dsbr100_start failed\n"); mutex_unlock(&radio->v4l2_lock); dev_info(&intf->dev, "coming out of suspend..\n"); return 0; } /* free data structures */ static void usb_dsbr100_release(struct v4l2_device *v4l2_dev) { struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev); v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(&radio->v4l2_dev); kfree(radio->transfer_buffer); kfree(radio); } static const struct v4l2_ctrl_ops usb_dsbr100_ctrl_ops = { .s_ctrl = usb_dsbr100_s_ctrl, }; /* File system interface */ static const struct v4l2_file_operations usb_dsbr100_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, }; static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* check if the device is present and register with v4l and usb if it is */ static int usb_dsbr100_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dsbr100_device *radio; struct v4l2_device *v4l2_dev; int retval; radio = kzalloc(sizeof(struct dsbr100_device), GFP_KERNEL); if (!radio) return -ENOMEM; radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL); if (!(radio->transfer_buffer)) { kfree(radio); return -ENOMEM; } v4l2_dev = &radio->v4l2_dev; v4l2_dev->release = usb_dsbr100_release; retval = v4l2_device_register(&intf->dev, v4l2_dev); if (retval < 0) { v4l2_err(v4l2_dev, "couldn't register v4l2_device\n"); goto err_reg_dev; } v4l2_ctrl_handler_init(&radio->hdl, 1); v4l2_ctrl_new_std(&radio->hdl, &usb_dsbr100_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); if (radio->hdl.error) { retval = radio->hdl.error; v4l2_err(v4l2_dev, "couldn't register control\n"); goto err_reg_ctrl; } mutex_init(&radio->v4l2_lock); strscpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name)); radio->videodev.v4l2_dev = v4l2_dev; radio->videodev.fops = &usb_dsbr100_fops; radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops; radio->videodev.release = video_device_release_empty; radio->videodev.lock = &radio->v4l2_lock; radio->videodev.ctrl_handler = &radio->hdl; radio->videodev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER; radio->usbdev = interface_to_usbdev(intf); radio->curfreq = FREQ_MIN * FREQ_MUL; radio->muted = true; video_set_drvdata(&radio->videodev, radio); usb_set_intfdata(intf, radio); retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval == 0) return 0; v4l2_err(v4l2_dev, "couldn't register video device\n"); err_reg_ctrl: v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(v4l2_dev); err_reg_dev: kfree(radio->transfer_buffer); kfree(radio); return retval; } static const struct usb_device_id usb_dsbr100_device_table[] = { { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_dsbr100_device_table); /* USB subsystem interface */ static struct usb_driver usb_dsbr100_driver = { .name = "dsbr100", .probe = usb_dsbr100_probe, .disconnect = usb_dsbr100_disconnect, .id_table = usb_dsbr100_device_table, .suspend = usb_dsbr100_suspend, .resume = usb_dsbr100_resume, .reset_resume = usb_dsbr100_resume, }; module_usb_driver(usb_dsbr100_driver); |
| 16 15 2 16 16 16 16 2 16 16 15 2 2 16 16 15 16 16 16 16 16 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 | // SPDX-License-Identifier: GPL-2.0+ /* * comedi/comedi_fops.c * comedi kernel module * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org> * compat ioctls: * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk> * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/comedi/comedidev.h> #include <linux/cdev.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/compat.h> #include "comedi_internal.h" /* * comedi_subdevice "runflags" * COMEDI_SRF_RT: DEPRECATED: command is running real-time * COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred * since the last command was started * COMEDI_SRF_RUNNING: command is running * COMEDI_SRF_FREE_SPRIV: free s->private on detach * * COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy" */ #define COMEDI_SRF_RT BIT(1) #define COMEDI_SRF_ERROR BIT(2) #define COMEDI_SRF_RUNNING BIT(27) #define COMEDI_SRF_FREE_SPRIV BIT(31) #define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING) /** * struct comedi_file - Per-file private data for COMEDI device * @dev: COMEDI device. * @read_subdev: Current "read" subdevice. * @write_subdev: Current "write" subdevice. * @last_detach_count: Last known detach count. * @last_attached: Last known attached/detached state. */ struct comedi_file { struct comedi_device *dev; struct comedi_subdevice *read_subdev; struct comedi_subdevice *write_subdev; unsigned int last_detach_count; unsigned int last_attached:1; }; #define COMEDI_NUM_MINORS 0x100 #define COMEDI_NUM_SUBDEVICE_MINORS \ (COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS) static unsigned short comedi_num_legacy_minors; module_param(comedi_num_legacy_minors, ushort, 0444); MODULE_PARM_DESC(comedi_num_legacy_minors, "number of comedi minor devices to reserve for non-auto-configured devices (default 0)" ); unsigned int comedi_default_buf_size_kb = CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB; module_param(comedi_default_buf_size_kb, uint, 0644); MODULE_PARM_DESC(comedi_default_buf_size_kb, "default asynchronous buffer size in KiB (default " __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB) ")"); unsigned int comedi_default_buf_maxsize_kb = CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB; module_param(comedi_default_buf_maxsize_kb, uint, 0644); MODULE_PARM_DESC(comedi_default_buf_maxsize_kb, "default maximum size of asynchronous buffer in KiB (default " __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB) ")"); static DEFINE_MUTEX(comedi_board_minor_table_lock); static struct comedi_device *comedi_board_minor_table[COMEDI_NUM_BOARD_MINORS]; static DEFINE_MUTEX(comedi_subdevice_minor_table_lock); /* Note: indexed by minor - COMEDI_NUM_BOARD_MINORS. */ static struct comedi_subdevice *comedi_subdevice_minor_table[COMEDI_NUM_SUBDEVICE_MINORS]; static struct cdev comedi_cdev; static void comedi_device_init(struct comedi_device *dev) { kref_init(&dev->refcount); spin_lock_init(&dev->spinlock); mutex_init(&dev->mutex); init_rwsem(&dev->attach_lock); dev->minor = -1; } static void comedi_dev_kref_release(struct kref *kref) { struct comedi_device *dev = container_of(kref, struct comedi_device, refcount); mutex_destroy(&dev->mutex); put_device(dev->class_dev); kfree(dev); } /** * comedi_dev_put() - Release a use of a COMEDI device * @dev: COMEDI device. * * Must be called when a user of a COMEDI device is finished with it. * When the last user of the COMEDI device calls this function, the * COMEDI device is destroyed. * * Return: 1 if the COMEDI device is destroyed by this call or @dev is * NULL, otherwise return 0. Callers must not assume the COMEDI * device is still valid if this function returns 0. */ int comedi_dev_put(struct comedi_device *dev) { if (dev) return kref_put(&dev->refcount, comedi_dev_kref_release); return 1; } EXPORT_SYMBOL_GPL(comedi_dev_put); static struct comedi_device *comedi_dev_get(struct comedi_device *dev) { if (dev) kref_get(&dev->refcount); return dev; } static void comedi_device_cleanup(struct comedi_device *dev) { struct module *driver_module = NULL; if (!dev) return; mutex_lock(&dev->mutex); if (dev->attached) driver_module = dev->driver->module; comedi_device_detach(dev); if (driver_module && dev->use_count) module_put(driver_module); mutex_unlock(&dev->mutex); } static bool comedi_clear_board_dev(struct comedi_device *dev) { unsigned int i = dev->minor; bool cleared = false; lockdep_assert_held(&dev->mutex); mutex_lock(&comedi_board_minor_table_lock); if (dev == comedi_board_minor_table[i]) { comedi_board_minor_table[i] = NULL; cleared = true; } mutex_unlock(&comedi_board_minor_table_lock); return cleared; } static struct comedi_device *comedi_clear_board_minor(unsigned int minor) { struct comedi_device *dev; mutex_lock(&comedi_board_minor_table_lock); dev = comedi_board_minor_table[minor]; comedi_board_minor_table[minor] = NULL; mutex_unlock(&comedi_board_minor_table_lock); return dev; } static struct comedi_subdevice * comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); s = comedi_subdevice_minor_table[i]; if (s && s->device != dev) s = NULL; mutex_unlock(&comedi_subdevice_minor_table_lock); return s; } static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor) { struct comedi_device *dev; mutex_lock(&comedi_board_minor_table_lock); dev = comedi_dev_get(comedi_board_minor_table[minor]); mutex_unlock(&comedi_board_minor_table_lock); return dev; } static struct comedi_device * comedi_dev_get_from_subdevice_minor(unsigned int minor) { struct comedi_device *dev; struct comedi_subdevice *s; unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); s = comedi_subdevice_minor_table[i]; dev = comedi_dev_get(s ? s->device : NULL); mutex_unlock(&comedi_subdevice_minor_table_lock); return dev; } /** * comedi_dev_get_from_minor() - Get COMEDI device by minor device number * @minor: Minor device number. * * Finds the COMEDI device associated with the minor device number, if any, * and increments its reference count. The COMEDI device is prevented from * being freed until a matching call is made to comedi_dev_put(). * * Return: A pointer to the COMEDI device if it exists, with its usage * reference incremented. Return NULL if no COMEDI device exists with the * specified minor device number. */ struct comedi_device *comedi_dev_get_from_minor(unsigned int minor) { if (minor < COMEDI_NUM_BOARD_MINORS) return comedi_dev_get_from_board_minor(minor); return comedi_dev_get_from_subdevice_minor(minor); } EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor); static struct comedi_subdevice * comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || (s->subdev_flags & SDF_CMD_READ)) return s; } return dev->read_subdev; } static struct comedi_subdevice * comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || (s->subdev_flags & SDF_CMD_WRITE)) return s; } return dev->write_subdev; } static void comedi_file_reset(struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s, *read_s, *write_s; unsigned int minor = iminor(file_inode(file)); read_s = dev->read_subdev; write_s = dev->write_subdev; if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || s->subdev_flags & SDF_CMD_READ) read_s = s; if (!s || s->subdev_flags & SDF_CMD_WRITE) write_s = s; } cfp->last_attached = dev->attached; cfp->last_detach_count = dev->detach_count; WRITE_ONCE(cfp->read_subdev, read_s); WRITE_ONCE(cfp->write_subdev, write_s); } static void comedi_file_check(struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; if (cfp->last_attached != dev->attached || cfp->last_detach_count != dev->detach_count) comedi_file_reset(file); } static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file) { struct comedi_file *cfp = file->private_data; comedi_file_check(file); return READ_ONCE(cfp->read_subdev); } static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file) { struct comedi_file *cfp = file->private_data; comedi_file_check(file); return READ_ONCE(cfp->write_subdev); } static int resize_async_buffer(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int new_size) { struct comedi_async *async = s->async; int retval; lockdep_assert_held(&dev->mutex); if (new_size > async->max_bufsize) return -EPERM; if (s->busy) { dev_dbg(dev->class_dev, "subdevice is busy, cannot resize buffer\n"); return -EBUSY; } if (comedi_buf_is_mmapped(s)) { dev_dbg(dev->class_dev, "subdevice is mmapped, cannot resize buffer\n"); return -EBUSY; } /* make sure buffer is an integral number of pages (we round up) */ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; retval = comedi_buf_alloc(dev, s, new_size); if (retval < 0) return retval; if (s->buf_change) { retval = s->buf_change(dev, s); if (retval < 0) return retval; } dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n", s->index, async->prealloc_bufsz); return 0; } /* sysfs attribute files */ static ssize_t max_read_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t max_read_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) s->async->max_bufsize = size; else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_read_buffer_kb); static ssize_t read_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t read_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) err = resize_async_buffer(dev, s, size); else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(read_buffer_kb); static ssize_t max_write_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t max_write_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) s->async->max_bufsize = size; else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_write_buffer_kb); static ssize_t write_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t write_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) err = resize_async_buffer(dev, s, size); else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(write_buffer_kb); static struct attribute *comedi_dev_attrs[] = { &dev_attr_max_read_buffer_kb.attr, &dev_attr_read_buffer_kb.attr, &dev_attr_max_write_buffer_kb.attr, &dev_attr_write_buffer_kb.attr, NULL, }; ATTRIBUTE_GROUPS(comedi_dev); static const struct class comedi_class = { .name = "comedi", .dev_groups = comedi_dev_groups, }; static void comedi_free_board_dev(struct comedi_device *dev) { if (dev) { comedi_device_cleanup(dev); if (dev->class_dev) { device_destroy(&comedi_class, MKDEV(COMEDI_MAJOR, dev->minor)); } comedi_dev_put(dev); } } static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s, unsigned int bits) { s->runflags &= ~bits; } static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s, unsigned int bits) { s->runflags |= bits; } static void comedi_update_subdevice_runflags(struct comedi_subdevice *s, unsigned int mask, unsigned int bits) { unsigned long flags; spin_lock_irqsave(&s->spin_lock, flags); __comedi_clear_subdevice_runflags(s, mask); __comedi_set_subdevice_runflags(s, bits & mask); spin_unlock_irqrestore(&s->spin_lock, flags); } static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s) { return s->runflags; } static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s) { unsigned long flags; unsigned int runflags; spin_lock_irqsave(&s->spin_lock, flags); runflags = __comedi_get_subdevice_runflags(s); spin_unlock_irqrestore(&s->spin_lock, flags); return runflags; } static bool comedi_is_runflags_running(unsigned int runflags) { return runflags & COMEDI_SRF_RUNNING; } static bool comedi_is_runflags_in_error(unsigned int runflags) { return runflags & COMEDI_SRF_ERROR; } /** * comedi_is_subdevice_running() - Check if async command running on subdevice * @s: COMEDI subdevice. * * Return: %true if an asynchronous COMEDI command is active on the * subdevice, else %false. */ bool comedi_is_subdevice_running(struct comedi_subdevice *s) { unsigned int runflags = comedi_get_subdevice_runflags(s); return comedi_is_runflags_running(runflags); } EXPORT_SYMBOL_GPL(comedi_is_subdevice_running); static bool __comedi_is_subdevice_running(struct comedi_subdevice *s) { unsigned int runflags = __comedi_get_subdevice_runflags(s); return comedi_is_runflags_running(runflags); } bool comedi_can_auto_free_spriv(struct comedi_subdevice *s) { unsigned int runflags = __comedi_get_subdevice_runflags(s); return runflags & COMEDI_SRF_FREE_SPRIV; } /** * comedi_set_spriv_auto_free() - Mark subdevice private data as freeable * @s: COMEDI subdevice. * * Mark the subdevice as having a pointer to private data that can be * automatically freed when the COMEDI device is detached from the low-level * driver. */ void comedi_set_spriv_auto_free(struct comedi_subdevice *s) { __comedi_set_subdevice_runflags(s, COMEDI_SRF_FREE_SPRIV); } EXPORT_SYMBOL_GPL(comedi_set_spriv_auto_free); /** * comedi_alloc_spriv - Allocate memory for the subdevice private data * @s: COMEDI subdevice. * @size: Size of the memory to allocate. * * Allocate memory for the subdevice private data and point @s->private * to it. The memory will be freed automatically when the COMEDI device * is detached from the low-level driver. * * Return: A pointer to the allocated memory @s->private on success. * Return NULL on failure. */ void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size) { s->private = kzalloc(size, GFP_KERNEL); if (s->private) comedi_set_spriv_auto_free(s); return s->private; } EXPORT_SYMBOL_GPL(comedi_alloc_spriv); /* * This function restores a subdevice to an idle state. */ static void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; lockdep_assert_held(&dev->mutex); comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0); if (async) { comedi_buf_reset(s); async->inttrig = NULL; kfree(async->cmd.chanlist); async->cmd.chanlist = NULL; s->busy = NULL; wake_up_interruptible_all(&async->wait_head); } else { dev_err(dev->class_dev, "BUG: (?) %s called with async=NULL\n", __func__); s->busy = NULL; } } static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { int ret = 0; lockdep_assert_held(&dev->mutex); if (comedi_is_subdevice_running(s) && s->cancel) ret = s->cancel(dev, s); do_become_nonbusy(dev, s); return ret; } void comedi_device_cancel_all(struct comedi_device *dev) { struct comedi_subdevice *s; int i; lockdep_assert_held(&dev->mutex); if (!dev->attached) return; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->async) do_cancel(dev, s); } } static int is_device_busy(struct comedi_device *dev) { struct comedi_subdevice *s; int i; lockdep_assert_held_write(&dev->attach_lock); lockdep_assert_held(&dev->mutex); if (!dev->attached) return 0; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->busy) return 1; if (!s->async) continue; if (comedi_buf_is_mmapped(s)) return 1; /* * There may be tasks still waiting on the subdevice's wait * queue, although they should already be about to be removed * from it since the subdevice has no active async command. */ if (wq_has_sleeper(&s->async->wait_head)) return 1; } return 0; } /* * COMEDI_DEVCONFIG ioctl * attaches (and configures) or detaches a legacy device * * arg: * pointer to comedi_devconfig structure (NULL if detaching) * * reads: * comedi_devconfig structure (if attaching) * * writes: * nothing */ static int do_devconfig_ioctl(struct comedi_device *dev, struct comedi_devconfig __user *arg) { struct comedi_devconfig it; lockdep_assert_held(&dev->mutex); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!arg) { int rc = 0; if (dev->attached) { down_write(&dev->attach_lock); if (is_device_busy(dev)) { rc = -EBUSY; } else { struct module *driver_module = dev->driver->module; comedi_device_detach_locked(dev); module_put(driver_module); } up_write(&dev->attach_lock); } return rc; } if (copy_from_user(&it, arg, sizeof(it))) return -EFAULT; it.board_name[COMEDI_NAMELEN - 1] = 0; if (it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) { dev_warn(dev->class_dev, "comedi_config --init_data is deprecated\n"); return -EINVAL; } if (dev->minor >= comedi_num_legacy_minors) /* don't re-use dynamically allocated comedi devices */ return -EBUSY; /* This increments the driver module count on success. */ return comedi_device_attach(dev, &it); } /* * COMEDI_BUFCONFIG ioctl * buffer configuration * * arg: * pointer to comedi_bufconfig structure * * reads: * comedi_bufconfig structure * * writes: * modified comedi_bufconfig structure */ static int do_bufconfig_ioctl(struct comedi_device *dev, struct comedi_bufconfig __user *arg) { struct comedi_bufconfig bc; struct comedi_async *async; struct comedi_subdevice *s; int retval = 0; lockdep_assert_held(&dev->mutex); if (copy_from_user(&bc, arg, sizeof(bc))) return -EFAULT; if (bc.subdevice >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[bc.subdevice]; async = s->async; if (!async) { dev_dbg(dev->class_dev, "subdevice does not have async capability\n"); bc.size = 0; bc.maximum_size = 0; goto copyback; } if (bc.maximum_size) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; async->max_bufsize = bc.maximum_size; } if (bc.size) { retval = resize_async_buffer(dev, s, bc.size); if (retval < 0) return retval; } bc.size = async->prealloc_bufsz; bc.maximum_size = async->max_bufsize; copyback: if (copy_to_user(arg, &bc, sizeof(bc))) return -EFAULT; return 0; } /* * COMEDI_DEVINFO ioctl * device info * * arg: * pointer to comedi_devinfo structure * * reads: * nothing * * writes: * comedi_devinfo structure */ static int do_devinfo_ioctl(struct comedi_device *dev, struct comedi_devinfo __user *arg, struct file *file) { struct comedi_subdevice *s; struct comedi_devinfo devinfo; lockdep_assert_held(&dev->mutex); memset(&devinfo, 0, sizeof(devinfo)); /* fill devinfo structure */ devinfo.version_code = COMEDI_VERSION_CODE; devinfo.n_subdevs = dev->n_subdevices; strscpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN); strscpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN); s = comedi_file_read_subdevice(file); if (s) devinfo.read_subdevice = s->index; else devinfo.read_subdevice = -1; s = comedi_file_write_subdevice(file); if (s) devinfo.write_subdevice = s->index; else devinfo.write_subdevice = -1; if (copy_to_user(arg, &devinfo, sizeof(devinfo))) return -EFAULT; return 0; } /* * COMEDI_SUBDINFO ioctl * subdevices info * * arg: * pointer to array of comedi_subdinfo structures * * reads: * nothing * * writes: * array of comedi_subdinfo structures */ static int do_subdinfo_ioctl(struct comedi_device *dev, struct comedi_subdinfo __user *arg, void *file) { int ret, i; struct comedi_subdinfo *tmp, *us; struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); tmp = kcalloc(dev->n_subdevices, sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; /* fill subdinfo structs */ for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; us = tmp + i; us->type = s->type; us->n_chan = s->n_chan; us->subd_flags = s->subdev_flags; if (comedi_is_subdevice_running(s)) us->subd_flags |= SDF_RUNNING; #define TIMER_nanosec 5 /* backwards compatibility */ us->timer_type = TIMER_nanosec; us->len_chanlist = s->len_chanlist; us->maxdata = s->maxdata; if (s->range_table) { us->range_type = (i << 24) | (0 << 16) | (s->range_table->length); } else { us->range_type = 0; /* XXX */ } if (s->busy) us->subd_flags |= SDF_BUSY; if (s->busy == file) us->subd_flags |= SDF_BUSY_OWNER; if (s->lock) us->subd_flags |= SDF_LOCKED; if (s->lock == file) us->subd_flags |= SDF_LOCK_OWNER; if (!s->maxdata && s->maxdata_list) us->subd_flags |= SDF_MAXDATA; if (s->range_table_list) us->subd_flags |= SDF_RANGETYPE; if (s->do_cmd) us->subd_flags |= SDF_CMD; if (s->insn_bits != &insn_inval) us->insn_bits_support = COMEDI_SUPPORTED; else us->insn_bits_support = COMEDI_UNSUPPORTED; } ret = copy_to_user(arg, tmp, dev->n_subdevices * sizeof(*tmp)); kfree(tmp); return ret ? -EFAULT : 0; } /* * COMEDI_CHANINFO ioctl * subdevice channel info * * arg: * pointer to comedi_chaninfo structure * * reads: * comedi_chaninfo structure * * writes: * array of maxdata values to chaninfo->maxdata_list if requested * array of range table lengths to chaninfo->range_table_list if requested */ static int do_chaninfo_ioctl(struct comedi_device *dev, struct comedi_chaninfo *it) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (it->subdev >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[it->subdev]; if (it->maxdata_list) { if (s->maxdata || !s->maxdata_list) return -EINVAL; if (copy_to_user(it->maxdata_list, s->maxdata_list, s->n_chan * sizeof(unsigned int))) return -EFAULT; } if (it->flaglist) return -EINVAL; /* flaglist not supported */ if (it->rangelist) { int i; if (!s->range_table_list) return -EINVAL; for (i = 0; i < s->n_chan; i++) { int x; x = (dev->minor << 28) | (it->subdev << 24) | (i << 16) | (s->range_table_list[i]->length); if (put_user(x, it->rangelist + i)) return -EFAULT; } } return 0; } /* * COMEDI_BUFINFO ioctl * buffer information * * arg: * pointer to comedi_bufinfo structure * * reads: * comedi_bufinfo structure * * writes: * modified comedi_bufinfo structure */ static int do_bufinfo_ioctl(struct comedi_device *dev, struct comedi_bufinfo __user *arg, void *file) { struct comedi_bufinfo bi; struct comedi_subdevice *s; struct comedi_async *async; unsigned int runflags; int retval = 0; bool become_nonbusy = false; lockdep_assert_held(&dev->mutex); if (copy_from_user(&bi, arg, sizeof(bi))) return -EFAULT; if (bi.subdevice >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[bi.subdevice]; async = s->async; if (!async || s->busy != file) return -EINVAL; runflags = comedi_get_subdevice_runflags(s); if (!(async->cmd.flags & CMDF_WRITE)) { /* command was set up in "read" direction */ if (bi.bytes_read) { comedi_buf_read_alloc(s, bi.bytes_read); bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read); } /* * If nothing left to read, and command has stopped, and * {"read" position not updated or command stopped normally}, * then become non-busy. */ if (comedi_buf_read_n_available(s) == 0 && !comedi_is_runflags_running(runflags) && (bi.bytes_read == 0 || !comedi_is_runflags_in_error(runflags))) { become_nonbusy = true; if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; } bi.bytes_written = 0; } else { /* command was set up in "write" direction */ if (!comedi_is_runflags_running(runflags)) { bi.bytes_written = 0; become_nonbusy = true; if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; } else if (bi.bytes_written) { comedi_buf_write_alloc(s, bi.bytes_written); bi.bytes_written = comedi_buf_write_free(s, bi.bytes_written); } bi.bytes_read = 0; } bi.buf_write_count = async->buf_write_count; bi.buf_write_ptr = async->buf_write_ptr; bi.buf_read_count = async->buf_read_count; bi.buf_read_ptr = async->buf_read_ptr; if (become_nonbusy) do_become_nonbusy(dev, s); if (retval) return retval; if (copy_to_user(arg, &bi, sizeof(bi))) return -EFAULT; return 0; } static int check_insn_config_length(struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: case INSN_CONFIG_DIO_INPUT: case INSN_CONFIG_DISARM: case INSN_CONFIG_RESET: if (insn->n == 1) return 0; break; case INSN_CONFIG_ARM: case INSN_CONFIG_DIO_QUERY: case INSN_CONFIG_BLOCK_SIZE: case INSN_CONFIG_FILTER: case INSN_CONFIG_SERIAL_CLOCK: case INSN_CONFIG_BIDIRECTIONAL_DATA: case INSN_CONFIG_ALT_SOURCE: case INSN_CONFIG_SET_COUNTER_MODE: case INSN_CONFIG_8254_READ_STATUS: case INSN_CONFIG_SET_ROUTING: case INSN_CONFIG_GET_ROUTING: case INSN_CONFIG_GET_PWM_STATUS: case INSN_CONFIG_PWM_SET_PERIOD: case INSN_CONFIG_PWM_GET_PERIOD: if (insn->n == 2) return 0; break; case INSN_CONFIG_SET_GATE_SRC: case INSN_CONFIG_GET_GATE_SRC: case INSN_CONFIG_SET_CLOCK_SRC: case INSN_CONFIG_GET_CLOCK_SRC: case INSN_CONFIG_SET_OTHER_SRC: case INSN_CONFIG_GET_COUNTER_STATUS: case INSN_CONFIG_GET_PWM_OUTPUT: case INSN_CONFIG_PWM_SET_H_BRIDGE: case INSN_CONFIG_PWM_GET_H_BRIDGE: case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: if (insn->n == 3) return 0; break; case INSN_CONFIG_PWM_OUTPUT: case INSN_CONFIG_ANALOG_TRIG: case INSN_CONFIG_TIMER_1: if (insn->n == 5) return 0; break; case INSN_CONFIG_DIGITAL_TRIG: if (insn->n == 6) return 0; break; case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: if (insn->n >= 4) return 0; break; /* * by default we allow the insn since we don't have checks for * all possible cases yet */ default: pr_warn("No check for data length of config insn id %i is implemented\n", data[0]); pr_warn("Add a check to %s in %s\n", __func__, __FILE__); pr_warn("Assuming n=%i is correct\n", insn->n); return 0; } return -EINVAL; } static int check_insn_device_config_length(struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; switch (data[0]) { case INSN_DEVICE_CONFIG_TEST_ROUTE: case INSN_DEVICE_CONFIG_CONNECT_ROUTE: case INSN_DEVICE_CONFIG_DISCONNECT_ROUTE: if (insn->n == 3) return 0; break; case INSN_DEVICE_CONFIG_GET_ROUTES: /* * Big enough for config_id and the length of the userland * memory buffer. Additional length should be in factors of 2 * to communicate any returned route pairs (source,destination). */ if (insn->n >= 2) return 0; break; } return -EINVAL; } /** * get_valid_routes() - Calls low-level driver get_valid_routes function to * either return a count of valid routes to user, or copy * of list of all valid device routes to buffer in * userspace. * @dev: comedi device pointer * @data: data from user insn call. The length of the data must be >= 2. * data[0] must contain the INSN_DEVICE_CONFIG config_id. * data[1](input) contains the number of _pairs_ for which memory is * allotted from the user. If the user specifies '0', then only * the number of pairs available is returned. * data[1](output) returns either the number of pairs available (if none * where requested) or the number of _pairs_ that are copied back * to the user. * data[2::2] returns each (source, destination) pair. * * Return: -EINVAL if low-level driver does not allocate and return routes as * expected. Returns 0 otherwise. */ static int get_valid_routes(struct comedi_device *dev, unsigned int *data) { lockdep_assert_held(&dev->mutex); data[1] = dev->get_valid_routes(dev, data[1], data + 2); return 0; } static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data, void *file) { struct comedi_subdevice *s; int ret = 0; int i; lockdep_assert_held(&dev->mutex); if (insn->insn & INSN_MASK_SPECIAL) { /* a non-subdevice instruction */ switch (insn->insn) { case INSN_GTOD: { struct timespec64 tv; if (insn->n != 2) { ret = -EINVAL; break; } ktime_get_real_ts64(&tv); /* unsigned data safe until 2106 */ data[0] = (unsigned int)tv.tv_sec; data[1] = tv.tv_nsec / NSEC_PER_USEC; ret = 2; break; } case INSN_WAIT: if (insn->n != 1 || data[0] >= 100000) { ret = -EINVAL; break; } udelay(data[0] / 1000); ret = 1; break; case INSN_INTTRIG: if (insn->n != 1) { ret = -EINVAL; break; } if (insn->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "%d not usable subdevice\n", insn->subdev); ret = -EINVAL; break; } s = &dev->subdevices[insn->subdev]; if (!s->async) { dev_dbg(dev->class_dev, "no async\n"); ret = -EINVAL; break; } if (!s->async->inttrig) { dev_dbg(dev->class_dev, "no inttrig\n"); ret = -EAGAIN; break; } ret = s->async->inttrig(dev, s, data[0]); if (ret >= 0) ret = 1; break; case INSN_DEVICE_CONFIG: ret = check_insn_device_config_length(insn, data); if (ret) break; if (data[0] == INSN_DEVICE_CONFIG_GET_ROUTES) { /* * data[1] should be the number of _pairs_ that * the memory can hold. */ data[1] = (insn->n - 2) / 2; ret = get_valid_routes(dev, data); break; } /* other global device config instructions. */ ret = dev->insn_device_config(dev, insn, data); break; default: dev_dbg(dev->class_dev, "invalid insn\n"); ret = -EINVAL; break; } } else { /* a subdevice instruction */ unsigned int maxdata; if (insn->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "subdevice %d out of range\n", insn->subdev); ret = -EINVAL; goto out; } s = &dev->subdevices[insn->subdev]; if (s->type == COMEDI_SUBD_UNUSED) { dev_dbg(dev->class_dev, "%d not usable subdevice\n", insn->subdev); ret = -EIO; goto out; } /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { dev_dbg(dev->class_dev, "device locked\n"); ret = -EACCES; goto out; } ret = comedi_check_chanlist(s, 1, &insn->chanspec); if (ret < 0) { ret = -EINVAL; dev_dbg(dev->class_dev, "bad chanspec\n"); goto out; } if (s->busy) { ret = -EBUSY; goto out; } /* This looks arbitrary. It is. */ s->busy = parse_insn; switch (insn->insn) { case INSN_READ: ret = s->insn_read(dev, s, insn, data); if (ret == -ETIMEDOUT) { dev_dbg(dev->class_dev, "subdevice %d read instruction timed out\n", s->index); } break; case INSN_WRITE: maxdata = s->maxdata_list ? s->maxdata_list[CR_CHAN(insn->chanspec)] : s->maxdata; for (i = 0; i < insn->n; ++i) { if (data[i] > maxdata) { ret = -EINVAL; dev_dbg(dev->class_dev, "bad data value(s)\n"); break; } } if (ret == 0) { ret = s->insn_write(dev, s, insn, data); if (ret == -ETIMEDOUT) { dev_dbg(dev->class_dev, "subdevice %d write instruction timed out\n", s->index); } } break; case INSN_BITS: if (insn->n != 2) { ret = -EINVAL; } else { /* * Most drivers ignore the base channel in * insn->chanspec. Fix this here if * the subdevice has <= 32 channels. */ unsigned int orig_mask = data[0]; unsigned int shift = 0; if (s->n_chan <= 32) { shift = CR_CHAN(insn->chanspec); if (shift > 0) { insn->chanspec = 0; data[0] <<= shift; data[1] <<= shift; } } ret = s->insn_bits(dev, s, insn, data); data[0] = orig_mask; if (shift > 0) data[1] >>= shift; } break; case INSN_CONFIG: ret = check_insn_config_length(insn, data); if (ret) break; ret = s->insn_config(dev, s, insn, data); break; default: ret = -EINVAL; break; } s->busy = NULL; } out: return ret; } /* * COMEDI_INSNLIST ioctl * synchronous instruction list * * arg: * pointer to comedi_insnlist structure * * reads: * comedi_insnlist structure * array of comedi_insn structures from insnlist->insns pointer * data (for writes) from insns[].data pointers * * writes: * data (for reads) to insns[].data pointers */ /* arbitrary limits */ #define MIN_SAMPLES 16 #define MAX_SAMPLES 65536 static int do_insnlist_ioctl(struct comedi_device *dev, struct comedi_insn *insns, unsigned int n_insns, void *file) { unsigned int *data = NULL; unsigned int max_n_data_required = MIN_SAMPLES; int i = 0; int ret = 0; lockdep_assert_held(&dev->mutex); /* Determine maximum memory needed for all instructions. */ for (i = 0; i < n_insns; ++i) { if (insns[i].n > MAX_SAMPLES) { dev_dbg(dev->class_dev, "number of samples too large\n"); ret = -EINVAL; goto error; } max_n_data_required = max(max_n_data_required, insns[i].n); } /* Allocate scratch space for all instruction data. */ data = kmalloc_array(max_n_data_required, sizeof(unsigned int), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto error; } for (i = 0; i < n_insns; ++i) { unsigned int n = insns[i].n; if (insns[i].insn & INSN_MASK_WRITE) { if (copy_from_user(data, insns[i].data, n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_from_user failed\n"); ret = -EFAULT; goto error; } if (n < MIN_SAMPLES) { memset(&data[n], 0, (MIN_SAMPLES - n) * sizeof(unsigned int)); } } else { memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) * sizeof(unsigned int)); } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) goto error; if (insns[i].insn & INSN_MASK_READ) { if (copy_to_user(insns[i].data, data, n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_to_user failed\n"); ret = -EFAULT; goto error; } } if (need_resched()) schedule(); } error: kfree(data); if (ret < 0) return ret; return i; } #define MAX_INSNS MAX_SAMPLES static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns) { if (n_insns > MAX_INSNS) { dev_dbg(dev->class_dev, "insnlist length too large\n"); return -EINVAL; } return 0; } /* * COMEDI_INSN ioctl * synchronous instruction * * arg: * pointer to comedi_insn structure * * reads: * comedi_insn structure * data (for writes) from insn->data pointer * * writes: * data (for reads) to insn->data pointer */ static int do_insn_ioctl(struct comedi_device *dev, struct comedi_insn *insn, void *file) { unsigned int *data = NULL; unsigned int n_data = MIN_SAMPLES; int ret = 0; lockdep_assert_held(&dev->mutex); n_data = max(n_data, insn->n); /* This is where the behavior of insn and insnlist deviate. */ if (insn->n > MAX_SAMPLES) { insn->n = MAX_SAMPLES; n_data = MAX_SAMPLES; } data = kmalloc_array(n_data, sizeof(unsigned int), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto error; } if (insn->insn & INSN_MASK_WRITE) { if (copy_from_user(data, insn->data, insn->n * sizeof(unsigned int))) { ret = -EFAULT; goto error; } if (insn->n < MIN_SAMPLES) { memset(&data[insn->n], 0, (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); } } else { memset(data, 0, n_data * sizeof(unsigned int)); } ret = parse_insn(dev, insn, data, file); if (ret < 0) goto error; if (insn->insn & INSN_MASK_READ) { if (copy_to_user(insn->data, data, insn->n * sizeof(unsigned int))) { ret = -EFAULT; goto error; } } ret = insn->n; error: kfree(data); return ret; } static int __comedi_get_user_cmd(struct comedi_device *dev, struct comedi_cmd *cmd) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (cmd->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev); return -ENODEV; } s = &dev->subdevices[cmd->subdev]; if (s->type == COMEDI_SUBD_UNUSED) { dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd->subdev); return -EIO; } if (!s->do_cmd || !s->do_cmdtest || !s->async) { dev_dbg(dev->class_dev, "subdevice %d does not support commands\n", cmd->subdev); return -EIO; } /* make sure channel/gain list isn't too long */ if (cmd->chanlist_len > s->len_chanlist) { dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n", cmd->chanlist_len, s->len_chanlist); return -EINVAL; } /* * Set the CMDF_WRITE flag to the correct state if the subdevice * supports only "read" commands or only "write" commands. */ switch (s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) { case SDF_CMD_READ: cmd->flags &= ~CMDF_WRITE; break; case SDF_CMD_WRITE: cmd->flags |= CMDF_WRITE; break; default: break; } return 0; } static int __comedi_get_user_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int __user *user_chanlist, struct comedi_cmd *cmd) { unsigned int *chanlist; int ret; lockdep_assert_held(&dev->mutex); cmd->chanlist = NULL; chanlist = memdup_array_user(user_chanlist, cmd->chanlist_len, sizeof(unsigned int)); if (IS_ERR(chanlist)) return PTR_ERR(chanlist); /* make sure each element in channel/gain list is valid */ ret = comedi_check_chanlist(s, cmd->chanlist_len, chanlist); if (ret < 0) { kfree(chanlist); return ret; } cmd->chanlist = chanlist; return 0; } /* * COMEDI_CMD ioctl * asynchronous acquisition command set-up * * arg: * pointer to comedi_cmd structure * * reads: * comedi_cmd structure * channel/range list from cmd->chanlist pointer * * writes: * possibly modified comedi_cmd structure (when -EAGAIN returned) */ static int do_cmd_ioctl(struct comedi_device *dev, struct comedi_cmd *cmd, bool *copy, void *file) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int __user *user_chanlist; int ret; lockdep_assert_held(&dev->mutex); /* do some simple cmd validation */ ret = __comedi_get_user_cmd(dev, cmd); if (ret) return ret; /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd->chanlist; s = &dev->subdevices[cmd->subdev]; async = s->async; /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { dev_dbg(dev->class_dev, "subdevice locked\n"); return -EACCES; } /* are we busy? */ if (s->busy) { dev_dbg(dev->class_dev, "subdevice busy\n"); return -EBUSY; } /* make sure channel/gain list isn't too short */ if (cmd->chanlist_len < 1) { dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n", cmd->chanlist_len); return -EINVAL; } async->cmd = *cmd; async->cmd.data = NULL; /* load channel/gain list */ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd); if (ret) goto cleanup; ret = s->do_cmdtest(dev, s, &async->cmd); if (async->cmd.flags & CMDF_BOGUS || ret) { dev_dbg(dev->class_dev, "test returned %d\n", ret); *cmd = async->cmd; /* restore chanlist pointer before copying back */ cmd->chanlist = (unsigned int __force *)user_chanlist; cmd->data = NULL; *copy = true; ret = -EAGAIN; goto cleanup; } if (!async->prealloc_bufsz) { ret = -ENOMEM; dev_dbg(dev->class_dev, "no buffer (?)\n"); goto cleanup; } comedi_buf_reset(s); async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK; if (async->cmd.flags & CMDF_WAKE_EOS) async->cb_mask |= COMEDI_CB_EOS; comedi_update_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK, COMEDI_SRF_RUNNING); /* * Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid * race with comedi_read() or comedi_write(). */ s->busy = file; ret = s->do_cmd(dev, s); if (ret == 0) return 0; cleanup: do_become_nonbusy(dev, s); return ret; } /* * COMEDI_CMDTEST ioctl * asynchronous acquisition command testing * * arg: * pointer to comedi_cmd structure * * reads: * comedi_cmd structure * channel/range list from cmd->chanlist pointer * * writes: * possibly modified comedi_cmd structure */ static int do_cmdtest_ioctl(struct comedi_device *dev, struct comedi_cmd *cmd, bool *copy, void *file) { struct comedi_subdevice *s; unsigned int __user *user_chanlist; int ret; lockdep_assert_held(&dev->mutex); /* do some simple cmd validation */ ret = __comedi_get_user_cmd(dev, cmd); if (ret) return ret; /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd->chanlist; s = &dev->subdevices[cmd->subdev]; /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */ if (user_chanlist) { /* load channel/gain list */ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, cmd); if (ret) return ret; } ret = s->do_cmdtest(dev, s, cmd); kfree(cmd->chanlist); /* free kernel copy of user chanlist */ /* restore chanlist pointer before copying back */ cmd->chanlist = (unsigned int __force *)user_chanlist; *copy = true; return ret; } /* * COMEDI_LOCK ioctl * lock subdevice * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { int ret = 0; unsigned long flags; struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; spin_lock_irqsave(&s->spin_lock, flags); if (s->busy || s->lock) ret = -EBUSY; else s->lock = file; spin_unlock_irqrestore(&s->spin_lock, flags); return ret; } /* * COMEDI_UNLOCK ioctl * unlock subdevice * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (s->busy) return -EBUSY; if (s->lock && s->lock != file) return -EACCES; if (s->lock == file) s->lock = NULL; return 0; } /* * COMEDI_CANCEL ioctl * cancel asynchronous acquisition * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (!s->async) return -EINVAL; if (!s->busy) return 0; if (s->busy != file) return -EBUSY; return do_cancel(dev, s); } /* * COMEDI_POLL ioctl * instructs driver to synchronize buffers * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (!s->busy) return 0; if (s->busy != file) return -EBUSY; if (s->poll) return s->poll(dev, s); return -EINVAL; } /* * COMEDI_SETRSUBD ioctl * sets the current "read" subdevice on a per-file basis * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_subdevice *s_old, *s_new; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s_new = &dev->subdevices[arg]; s_old = comedi_file_read_subdevice(file); if (s_old == s_new) return 0; /* no change */ if (!(s_new->subdev_flags & SDF_CMD_READ)) return -EINVAL; /* * Check the file isn't still busy handling a "read" command on the * old subdevice (if any). */ if (s_old && s_old->busy == file && s_old->async && !(s_old->async->cmd.flags & CMDF_WRITE)) return -EBUSY; WRITE_ONCE(cfp->read_subdev, s_new); return 0; } /* * COMEDI_SETWSUBD ioctl * sets the current "write" subdevice on a per-file basis * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_subdevice *s_old, *s_new; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s_new = &dev->subdevices[arg]; s_old = comedi_file_write_subdevice(file); if (s_old == s_new) return 0; /* no change */ if (!(s_new->subdev_flags & SDF_CMD_WRITE)) return -EINVAL; /* * Check the file isn't still busy handling a "write" command on the * old subdevice (if any). */ if (s_old && s_old->busy == file && s_old->async && (s_old->async->cmd.flags & CMDF_WRITE)) return -EBUSY; WRITE_ONCE(cfp->write_subdev, s_new); return 0; } static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file_inode(file)); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; int rc; mutex_lock(&dev->mutex); /* * Device config is special, because it must work on * an unconfigured device. */ if (cmd == COMEDI_DEVCONFIG) { if (minor >= COMEDI_NUM_BOARD_MINORS) { /* Device config not appropriate on non-board minors. */ rc = -ENOTTY; goto done; } rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); if (rc == 0) { if (arg == 0 && dev->minor >= comedi_num_legacy_minors) { /* * Successfully unconfigured a dynamically * allocated device. Try and remove it. */ if (comedi_clear_board_dev(dev)) { mutex_unlock(&dev->mutex); comedi_free_board_dev(dev); return rc; } } } goto done; } if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); rc = -ENODEV; goto done; } switch (cmd) { case COMEDI_BUFCONFIG: rc = do_bufconfig_ioctl(dev, (struct comedi_bufconfig __user *)arg); break; case COMEDI_DEVINFO: rc = do_devinfo_ioctl(dev, (struct comedi_devinfo __user *)arg, file); break; case COMEDI_SUBDINFO: rc = do_subdinfo_ioctl(dev, (struct comedi_subdinfo __user *)arg, file); break; case COMEDI_CHANINFO: { struct comedi_chaninfo it; if (copy_from_user(&it, (void __user *)arg, sizeof(it))) rc = -EFAULT; else rc = do_chaninfo_ioctl(dev, &it); break; } case COMEDI_RANGEINFO: { struct comedi_rangeinfo it; if (copy_from_user(&it, (void __user *)arg, sizeof(it))) rc = -EFAULT; else rc = do_rangeinfo_ioctl(dev, &it); break; } case COMEDI_BUFINFO: rc = do_bufinfo_ioctl(dev, (struct comedi_bufinfo __user *)arg, file); break; case COMEDI_LOCK: rc = do_lock_ioctl(dev, arg, file); break; case COMEDI_UNLOCK: rc = do_unlock_ioctl(dev, arg, file); break; case COMEDI_CANCEL: rc = do_cancel_ioctl(dev, arg, file); break; case COMEDI_CMD: { struct comedi_cmd cmd; bool copy = false; if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) { rc = -EFAULT; break; } rc = do_cmd_ioctl(dev, &cmd, ©, file); if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd))) rc = -EFAULT; break; } case COMEDI_CMDTEST: { struct comedi_cmd cmd; bool copy = false; if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) { rc = -EFAULT; break; } rc = do_cmdtest_ioctl(dev, &cmd, ©, file); if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd))) rc = -EFAULT; break; } case COMEDI_INSNLIST: { struct comedi_insnlist insnlist; struct comedi_insn *insns = NULL; if (copy_from_user(&insnlist, (void __user *)arg, sizeof(insnlist))) { rc = -EFAULT; break; } rc = check_insnlist_len(dev, insnlist.n_insns); if (rc) break; insns = memdup_array_user(insnlist.insns, insnlist.n_insns, sizeof(*insns)); if (IS_ERR(insns)) { rc = PTR_ERR(insns); break; } rc = do_insnlist_ioctl(dev, insns, insnlist.n_insns, file); kfree(insns); break; } case COMEDI_INSN: { struct comedi_insn insn; if (copy_from_user(&insn, (void __user *)arg, sizeof(insn))) rc = -EFAULT; else rc = do_insn_ioctl(dev, &insn, file); break; } case COMEDI_POLL: rc = do_poll_ioctl(dev, arg, file); break; case COMEDI_SETRSUBD: rc = do_setrsubd_ioctl(dev, arg, file); break; case COMEDI_SETWSUBD: rc = do_setwsubd_ioctl(dev, arg, file); break; default: rc = -ENOTTY; break; } done: mutex_unlock(&dev->mutex); return rc; } static void comedi_vm_open(struct vm_area_struct *area) { struct comedi_buf_map *bm; bm = area->vm_private_data; comedi_buf_map_get(bm); } static void comedi_vm_close(struct vm_area_struct *area) { struct comedi_buf_map *bm; bm = area->vm_private_data; comedi_buf_map_put(bm); } static int comedi_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { struct comedi_buf_map *bm = vma->vm_private_data; unsigned long offset = addr - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT); if (len < 0) return -EINVAL; if (len > vma->vm_end - addr) len = vma->vm_end - addr; return comedi_buf_map_access(bm, offset, buf, len, write); } static const struct vm_operations_struct comedi_vm_ops = { .open = comedi_vm_open, .close = comedi_vm_close, .access = comedi_vm_access, }; static int comedi_mmap(struct file *file, struct vm_area_struct *vma) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s; struct comedi_async *async; struct comedi_buf_map *bm = NULL; struct comedi_buf_page *buf; unsigned long start = vma->vm_start; unsigned long size; int n_pages; int i; int retval = 0; /* * 'trylock' avoids circular dependency with current->mm->mmap_lock * and down-reading &dev->attach_lock should normally succeed without * contention unless the device is in the process of being attached * or detached. */ if (!down_read_trylock(&dev->attach_lock)) return -EAGAIN; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto done; } if (vma->vm_flags & VM_WRITE) s = comedi_file_write_subdevice(file); else s = comedi_file_read_subdevice(file); if (!s) { retval = -EINVAL; goto done; } async = s->async; if (!async) { retval = -EINVAL; goto done; } if (vma->vm_pgoff != 0) { dev_dbg(dev->class_dev, "mmap() offset must be 0.\n"); retval = -EINVAL; goto done; } size = vma->vm_end - vma->vm_start; if (size > async->prealloc_bufsz) { retval = -EFAULT; goto done; } if (offset_in_page(size)) { retval = -EFAULT; goto done; } n_pages = vma_pages(vma); /* get reference to current buf map (if any) */ bm = comedi_buf_map_from_subdev_get(s); if (!bm || n_pages > bm->n_pages) { retval = -EINVAL; goto done; } if (bm->dma_dir != DMA_NONE) { unsigned long vm_start = vma->vm_start; unsigned long vm_end = vma->vm_end; /* * Buffer pages are not contiguous, so temporarily modify VMA * start and end addresses for each buffer page. */ for (i = 0; i < n_pages; ++i) { buf = &bm->page_list[i]; vma->vm_start = start; vma->vm_end = start + PAGE_SIZE; retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr, buf->dma_addr, PAGE_SIZE); if (retval) break; start += PAGE_SIZE; } vma->vm_start = vm_start; vma->vm_end = vm_end; } else { for (i = 0; i < n_pages; ++i) { unsigned long pfn; buf = &bm->page_list[i]; pfn = page_to_pfn(virt_to_page(buf->virt_addr)); retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED); if (retval) break; start += PAGE_SIZE; } } #ifdef CONFIG_MMU /* * Leaving behind a partial mapping of a buffer we're about to drop is * unsafe, see remap_pfn_range_notrack(). We need to zap the range * here ourselves instead of relying on the automatic zapping in * remap_pfn_range() because we call remap_pfn_range() in a loop. */ if (retval) zap_vma_ptes(vma, vma->vm_start, size); #endif if (retval == 0) { vma->vm_ops = &comedi_vm_ops; vma->vm_private_data = bm; vma->vm_ops->open(vma); } done: up_read(&dev->attach_lock); comedi_buf_map_put(bm); /* put reference to buf map - okay if NULL */ return retval; } static __poll_t comedi_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s, *s_read; down_read(&dev->attach_lock); if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); goto done; } s = comedi_file_read_subdevice(file); s_read = s; if (s && s->async) { poll_wait(file, &s->async->wait_head, wait); if (s->busy != file || !comedi_is_subdevice_running(s) || (s->async->cmd.flags & CMDF_WRITE) || comedi_buf_read_n_available(s) > 0) mask |= EPOLLIN | EPOLLRDNORM; } s = comedi_file_write_subdevice(file); if (s && s->async) { unsigned int bps = comedi_bytes_per_sample(s); if (s != s_read) poll_wait(file, &s->async->wait_head, wait); if (s->busy != file || !comedi_is_subdevice_running(s) || !(s->async->cmd.flags & CMDF_WRITE) || comedi_buf_write_n_available(s) >= bps) mask |= EPOLLOUT | EPOLLWRNORM; } done: up_read(&dev->attach_lock); return mask; } static unsigned int comedi_buf_copy_to_user(struct comedi_subdevice *s, void __user *dest, unsigned int src_offset, unsigned int n) { struct comedi_buf_map *bm = s->async->buf_map; struct comedi_buf_page *buf_page_list = bm->page_list; unsigned int page = src_offset >> PAGE_SHIFT; unsigned int offset = offset_in_page(src_offset); while (n) { unsigned int copy_amount = min(n, PAGE_SIZE - offset); unsigned int uncopied; uncopied = copy_to_user(dest, buf_page_list[page].virt_addr + offset, copy_amount); copy_amount -= uncopied; n -= copy_amount; if (uncopied) break; dest += copy_amount; page++; if (page == bm->n_pages) page = 0; /* buffer wraparound */ offset = 0; } return n; } static unsigned int comedi_buf_copy_from_user(struct comedi_subdevice *s, unsigned int dst_offset, const void __user *src, unsigned int n) { struct comedi_buf_map *bm = s->async->buf_map; struct comedi_buf_page *buf_page_list = bm->page_list; unsigned int page = dst_offset >> PAGE_SHIFT; unsigned int offset = offset_in_page(dst_offset); while (n) { unsigned int copy_amount = min(n, PAGE_SIZE - offset); unsigned int uncopied; uncopied = copy_from_user(buf_page_list[page].virt_addr + offset, src, copy_amount); copy_amount -= uncopied; n -= copy_amount; if (uncopied) break; src += copy_amount; page++; if (page == bm->n_pages) page = 0; /* buffer wraparound */ offset = 0; } return n; } static ssize_t comedi_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *offset) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int n, m; ssize_t count = 0; int retval = 0; DECLARE_WAITQUEUE(wait, current); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; bool become_nonbusy = false; bool attach_locked; unsigned int old_detach_count; /* Protect against device detachment during operation. */ down_read(&dev->attach_lock); attach_locked = true; old_detach_count = dev->detach_count; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto out; } s = comedi_file_write_subdevice(file); if (!s || !s->async) { retval = -EIO; goto out; } async = s->async; if (s->busy != file || !(async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; goto out; } add_wait_queue(&async->wait_head, &wait); while (count == 0 && !retval) { unsigned int runflags; set_current_state(TASK_INTERRUPTIBLE); runflags = comedi_get_subdevice_runflags(s); if (!comedi_is_runflags_running(runflags)) { if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; if (retval || nbytes) become_nonbusy = true; break; } if (nbytes == 0) break; /* Allocate all free buffer space. */ comedi_buf_write_alloc(s, async->prealloc_bufsz); m = comedi_buf_write_n_allocated(s); n = min_t(size_t, m, nbytes); if (n == 0) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (s->busy != file || !(async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; break; } continue; } set_current_state(TASK_RUNNING); m = comedi_buf_copy_from_user(s, async->buf_write_ptr, buf, n); if (m) { n -= m; retval = -EFAULT; } comedi_buf_write_free(s, n); count += n; nbytes -= n; buf += n; } remove_wait_queue(&async->wait_head, &wait); set_current_state(TASK_RUNNING); if (become_nonbusy && count == 0) { struct comedi_subdevice *new_s; /* * To avoid deadlock, cannot acquire dev->mutex * while dev->attach_lock is held. */ up_read(&dev->attach_lock); attach_locked = false; mutex_lock(&dev->mutex); /* * Check device hasn't become detached behind our back. * Checking dev->detach_count is unchanged ought to be * sufficient (unless there have been 2**32 detaches in the * meantime!), but check the subdevice pointer as well just in * case. * * Also check the subdevice is still in a suitable state to * become non-busy in case it changed behind our back. */ new_s = comedi_file_write_subdevice(file); if (dev->attached && old_detach_count == dev->detach_count && s == new_s && new_s->async == async && s->busy == file && (async->cmd.flags & CMDF_WRITE) && !comedi_is_subdevice_running(s)) do_become_nonbusy(dev, s); mutex_unlock(&dev->mutex); } out: if (attach_locked) up_read(&dev->attach_lock); return count ? count : retval; } static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, loff_t *offset) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int n, m; ssize_t count = 0; int retval = 0; DECLARE_WAITQUEUE(wait, current); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; unsigned int old_detach_count; bool become_nonbusy = false; bool attach_locked; /* Protect against device detachment during operation. */ down_read(&dev->attach_lock); attach_locked = true; old_detach_count = dev->detach_count; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto out; } s = comedi_file_read_subdevice(file); if (!s || !s->async) { retval = -EIO; goto out; } async = s->async; if (s->busy != file || (async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; goto out; } add_wait_queue(&async->wait_head, &wait); while (count == 0 && !retval) { set_current_state(TASK_INTERRUPTIBLE); m = comedi_buf_read_n_available(s); n = min_t(size_t, m, nbytes); if (n == 0) { unsigned int runflags = comedi_get_subdevice_runflags(s); if (!comedi_is_runflags_running(runflags)) { if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; if (retval || nbytes) become_nonbusy = true; break; } if (nbytes == 0) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (s->busy != file || (async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; break; } continue; } set_current_state(TASK_RUNNING); m = comedi_buf_copy_to_user(s, buf, async->buf_read_ptr, n); if (m) { n -= m; retval = -EFAULT; } comedi_buf_read_alloc(s, n); comedi_buf_read_free(s, n); count += n; nbytes -= n; buf += n; } remove_wait_queue(&async->wait_head, &wait); set_current_state(TASK_RUNNING); if (become_nonbusy && count == 0) { struct comedi_subdevice *new_s; /* * To avoid deadlock, cannot acquire dev->mutex * while dev->attach_lock is held. */ up_read(&dev->attach_lock); attach_locked = false; mutex_lock(&dev->mutex); /* * Check device hasn't become detached behind our back. * Checking dev->detach_count is unchanged ought to be * sufficient (unless there have been 2**32 detaches in the * meantime!), but check the subdevice pointer as well just in * case. * * Also check the subdevice is still in a suitable state to * become non-busy in case it changed behind our back. */ new_s = comedi_file_read_subdevice(file); if (dev->attached && old_detach_count == dev->detach_count && s == new_s && new_s->async == async && s->busy == file && !(async->cmd.flags & CMDF_WRITE) && !comedi_is_subdevice_running(s) && comedi_buf_read_n_available(s) == 0) do_become_nonbusy(dev, s); mutex_unlock(&dev->mutex); } out: if (attach_locked) up_read(&dev->attach_lock); return count ? count : retval; } static int comedi_open(struct inode *inode, struct file *file) { const unsigned int minor = iminor(inode); struct comedi_file *cfp; struct comedi_device *dev = comedi_dev_get_from_minor(minor); int rc; if (!dev) { pr_debug("invalid minor number\n"); return -ENODEV; } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); if (!cfp) { comedi_dev_put(dev); return -ENOMEM; } cfp->dev = dev; mutex_lock(&dev->mutex); if (!dev->attached && !capable(CAP_SYS_ADMIN)) { dev_dbg(dev->class_dev, "not attached and not CAP_SYS_ADMIN\n"); rc = -ENODEV; goto out; } if (dev->attached && dev->use_count == 0) { if (!try_module_get(dev->driver->module)) { rc = -ENXIO; goto out; } if (dev->open) { rc = dev->open(dev); if (rc < 0) { module_put(dev->driver->module); goto out; } } } dev->use_count++; file->private_data = cfp; comedi_file_reset(file); rc = 0; out: mutex_unlock(&dev->mutex); if (rc) { comedi_dev_put(dev); kfree(cfp); } return rc; } static int comedi_fasync(int fd, struct file *file, int on) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; return fasync_helper(fd, file, on, &dev->async_queue); } static int comedi_close(struct inode *inode, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s = NULL; int i; mutex_lock(&dev->mutex); if (dev->subdevices) { for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->busy == file) do_cancel(dev, s); if (s->lock == file) s->lock = NULL; } } if (dev->attached && dev->use_count == 1) { if (dev->close) dev->close(dev); module_put(dev->driver->module); } dev->use_count--; mutex_unlock(&dev->mutex); comedi_dev_put(dev); kfree(cfp); return 0; } #ifdef CONFIG_COMPAT #define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct) #define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct) /* * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR. * It's too late to change it now, but it only affects the command number. */ #define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct) /* * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR. * It's too late to change it now, but it only affects the command number. */ #define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct) #define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct) #define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct) struct comedi32_chaninfo_struct { unsigned int subdev; compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */ compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */ compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */ unsigned int unused[4]; }; struct comedi32_rangeinfo_struct { unsigned int range_type; compat_uptr_t range_ptr; /* 32-bit 'void *' */ }; struct comedi32_cmd_struct { unsigned int subdev; unsigned int flags; unsigned int start_src; unsigned int start_arg; unsigned int scan_begin_src; unsigned int scan_begin_arg; unsigned int convert_src; unsigned int convert_arg; unsigned int scan_end_src; unsigned int scan_end_arg; unsigned int stop_src; unsigned int stop_arg; compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */ unsigned int chanlist_len; compat_uptr_t data; /* 32-bit 'short *' */ unsigned int data_len; }; struct comedi32_insn_struct { unsigned int insn; unsigned int n; compat_uptr_t data; /* 32-bit 'unsigned int *' */ unsigned int subdev; unsigned int chanspec; unsigned int unused[3]; }; struct comedi32_insnlist_struct { unsigned int n_insns; compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */ }; /* Handle 32-bit COMEDI_CHANINFO ioctl. */ static int compat_chaninfo(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_chaninfo_struct chaninfo32; struct comedi_chaninfo chaninfo; int err; if (copy_from_user(&chaninfo32, compat_ptr(arg), sizeof(chaninfo32))) return -EFAULT; memset(&chaninfo, 0, sizeof(chaninfo)); chaninfo.subdev = chaninfo32.subdev; chaninfo.maxdata_list = compat_ptr(chaninfo32.maxdata_list); chaninfo.flaglist = compat_ptr(chaninfo32.flaglist); chaninfo.rangelist = compat_ptr(chaninfo32.rangelist); mutex_lock(&dev->mutex); err = do_chaninfo_ioctl(dev, &chaninfo); mutex_unlock(&dev->mutex); return err; } /* Handle 32-bit COMEDI_RANGEINFO ioctl. */ static int compat_rangeinfo(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_rangeinfo_struct rangeinfo32; struct comedi_rangeinfo rangeinfo; int err; if (copy_from_user(&rangeinfo32, compat_ptr(arg), sizeof(rangeinfo32))) return -EFAULT; memset(&rangeinfo, 0, sizeof(rangeinfo)); rangeinfo.range_type = rangeinfo32.range_type; rangeinfo.range_ptr = compat_ptr(rangeinfo32.range_ptr); mutex_lock(&dev->mutex); err = do_rangeinfo_ioctl(dev, &rangeinfo); mutex_unlock(&dev->mutex); return err; } /* Copy 32-bit cmd structure to native cmd structure. */ static int get_compat_cmd(struct comedi_cmd *cmd, struct comedi32_cmd_struct __user *cmd32) { struct comedi32_cmd_struct v32; if (copy_from_user(&v32, cmd32, sizeof(v32))) return -EFAULT; cmd->subdev = v32.subdev; cmd->flags = v32.flags; cmd->start_src = v32.start_src; cmd->start_arg = v32.start_arg; cmd->scan_begin_src = v32.scan_begin_src; cmd->scan_begin_arg = v32.scan_begin_arg; cmd->convert_src = v32.convert_src; cmd->convert_arg = v32.convert_arg; cmd->scan_end_src = v32.scan_end_src; cmd->scan_end_arg = v32.scan_end_arg; cmd->stop_src = v32.stop_src; cmd->stop_arg = v32.stop_arg; cmd->chanlist = (unsigned int __force *)compat_ptr(v32.chanlist); cmd->chanlist_len = v32.chanlist_len; cmd->data = compat_ptr(v32.data); cmd->data_len = v32.data_len; return 0; } /* Copy native cmd structure to 32-bit cmd structure. */ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32, struct comedi_cmd *cmd) { struct comedi32_cmd_struct v32; memset(&v32, 0, sizeof(v32)); v32.subdev = cmd->subdev; v32.flags = cmd->flags; v32.start_src = cmd->start_src; v32.start_arg = cmd->start_arg; v32.scan_begin_src = cmd->scan_begin_src; v32.scan_begin_arg = cmd->scan_begin_arg; v32.convert_src = cmd->convert_src; v32.convert_arg = cmd->convert_arg; v32.scan_end_src = cmd->scan_end_src; v32.scan_end_arg = cmd->scan_end_arg; v32.stop_src = cmd->stop_src; v32.stop_arg = cmd->stop_arg; /* Assume chanlist pointer is unchanged. */ v32.chanlist = ptr_to_compat((unsigned int __user *)cmd->chanlist); v32.chanlist_len = cmd->chanlist_len; v32.data = ptr_to_compat(cmd->data); v32.data_len = cmd->data_len; if (copy_to_user(cmd32, &v32, sizeof(v32))) return -EFAULT; return 0; } /* Handle 32-bit COMEDI_CMD ioctl. */ static int compat_cmd(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_cmd cmd; bool copy = false; int rc, err; rc = get_compat_cmd(&cmd, compat_ptr(arg)); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_cmd_ioctl(dev, &cmd, ©, file); mutex_unlock(&dev->mutex); if (copy) { /* Special case: copy cmd back to user. */ err = put_compat_cmd(compat_ptr(arg), &cmd); if (err) rc = err; } return rc; } /* Handle 32-bit COMEDI_CMDTEST ioctl. */ static int compat_cmdtest(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_cmd cmd; bool copy = false; int rc, err; rc = get_compat_cmd(&cmd, compat_ptr(arg)); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_cmdtest_ioctl(dev, &cmd, ©, file); mutex_unlock(&dev->mutex); if (copy) { err = put_compat_cmd(compat_ptr(arg), &cmd); if (err) rc = err; } return rc; } /* Copy 32-bit insn structure to native insn structure. */ static int get_compat_insn(struct comedi_insn *insn, struct comedi32_insn_struct __user *insn32) { struct comedi32_insn_struct v32; /* Copy insn structure. Ignore the unused members. */ if (copy_from_user(&v32, insn32, sizeof(v32))) return -EFAULT; memset(insn, 0, sizeof(*insn)); insn->insn = v32.insn; insn->n = v32.n; insn->data = compat_ptr(v32.data); insn->subdev = v32.subdev; insn->chanspec = v32.chanspec; return 0; } /* Handle 32-bit COMEDI_INSNLIST ioctl. */ static int compat_insnlist(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_insnlist_struct insnlist32; struct comedi32_insn_struct __user *insn32; struct comedi_insn *insns; unsigned int n; int rc; if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32))) return -EFAULT; rc = check_insnlist_len(dev, insnlist32.n_insns); if (rc) return rc; insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) return -ENOMEM; /* Copy insn structures. */ insn32 = compat_ptr(insnlist32.insns); for (n = 0; n < insnlist32.n_insns; n++) { rc = get_compat_insn(insns + n, insn32 + n); if (rc) { kfree(insns); return rc; } } mutex_lock(&dev->mutex); rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file); mutex_unlock(&dev->mutex); kfree(insns); return rc; } /* Handle 32-bit COMEDI_INSN ioctl. */ static int compat_insn(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_insn insn; int rc; rc = get_compat_insn(&insn, (void __user *)arg); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_insn_ioctl(dev, &insn, file); mutex_unlock(&dev->mutex); return rc; } /* * compat_ioctl file operation. * * Returns -ENOIOCTLCMD for unrecognised ioctl codes. */ static long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc; switch (cmd) { case COMEDI_DEVCONFIG: case COMEDI_DEVINFO: case COMEDI_SUBDINFO: case COMEDI_BUFCONFIG: case COMEDI_BUFINFO: /* Just need to translate the pointer argument. */ arg = (unsigned long)compat_ptr(arg); rc = comedi_unlocked_ioctl(file, cmd, arg); break; case COMEDI_LOCK: case COMEDI_UNLOCK: case COMEDI_CANCEL: case COMEDI_POLL: case COMEDI_SETRSUBD: case COMEDI_SETWSUBD: /* No translation needed. */ rc = comedi_unlocked_ioctl(file, cmd, arg); break; case COMEDI32_CHANINFO: rc = compat_chaninfo(file, arg); break; case COMEDI32_RANGEINFO: rc = compat_rangeinfo(file, arg); break; case COMEDI32_CMD: rc = compat_cmd(file, arg); break; case COMEDI32_CMDTEST: rc = compat_cmdtest(file, arg); break; case COMEDI32_INSNLIST: rc = compat_insnlist(file, arg); break; case COMEDI32_INSN: rc = compat_insn(file, arg); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #else #define comedi_compat_ioctl NULL #endif static const struct file_operations comedi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = comedi_unlocked_ioctl, .compat_ioctl = comedi_compat_ioctl, .open = comedi_open, .release = comedi_close, .read = comedi_read, .write = comedi_write, .mmap = comedi_mmap, .poll = comedi_poll, .fasync = comedi_fasync, .llseek = noop_llseek, }; /** * comedi_event() - Handle events for asynchronous COMEDI command * @dev: COMEDI device. * @s: COMEDI subdevice. * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held. * * If an asynchronous COMEDI command is active on the subdevice, process * any %COMEDI_CB_... event flags that have been set, usually by an * interrupt handler. These may change the run state of the asynchronous * command, wake a task, and/or send a %SIGIO signal. */ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; unsigned int events; int si_code = 0; unsigned long flags; spin_lock_irqsave(&s->spin_lock, flags); events = async->events; async->events = 0; if (!__comedi_is_subdevice_running(s)) { spin_unlock_irqrestore(&s->spin_lock, flags); return; } if (events & COMEDI_CB_CANCEL_MASK) __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING); /* * Remember if an error event has occurred, so an error can be * returned the next time the user does a read() or write(). */ if (events & COMEDI_CB_ERROR_MASK) __comedi_set_subdevice_runflags(s, COMEDI_SRF_ERROR); if (async->cb_mask & events) { wake_up_interruptible(&async->wait_head); si_code = async->cmd.flags & CMDF_WRITE ? POLL_OUT : POLL_IN; } spin_unlock_irqrestore(&s->spin_lock, flags); if (si_code) kill_fasync(&dev->async_queue, SIGIO, si_code); } EXPORT_SYMBOL_GPL(comedi_event); /* Note: the ->mutex is pre-locked on successful return */ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device) { struct comedi_device *dev; struct device *csdev; unsigned int i; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); comedi_device_init(dev); comedi_set_hw_dev(dev, hardware_device); mutex_lock(&dev->mutex); mutex_lock(&comedi_board_minor_table_lock); for (i = hardware_device ? comedi_num_legacy_minors : 0; i < COMEDI_NUM_BOARD_MINORS; ++i) { if (!comedi_board_minor_table[i]) { comedi_board_minor_table[i] = dev; break; } } mutex_unlock(&comedi_board_minor_table_lock); if (i == COMEDI_NUM_BOARD_MINORS) { mutex_unlock(&dev->mutex); comedi_device_cleanup(dev); comedi_dev_put(dev); dev_err(hardware_device, "ran out of minor numbers for board device files\n"); return ERR_PTR(-EBUSY); } dev->minor = i; csdev = device_create(&comedi_class, hardware_device, MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i); if (!IS_ERR(csdev)) dev->class_dev = get_device(csdev); /* Note: dev->mutex needs to be unlocked by the caller. */ return dev; } void comedi_release_hardware_device(struct device *hardware_device) { int minor; struct comedi_device *dev; for (minor = comedi_num_legacy_minors; minor < COMEDI_NUM_BOARD_MINORS; minor++) { mutex_lock(&comedi_board_minor_table_lock); dev = comedi_board_minor_table[minor]; if (dev && dev->hw_dev == hardware_device) { comedi_board_minor_table[minor] = NULL; mutex_unlock(&comedi_board_minor_table_lock); comedi_free_board_dev(dev); break; } mutex_unlock(&comedi_board_minor_table_lock); } } int comedi_alloc_subdevice_minor(struct comedi_subdevice *s) { struct comedi_device *dev = s->device; struct device *csdev; unsigned int i; mutex_lock(&comedi_subdevice_minor_table_lock); for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) { if (!comedi_subdevice_minor_table[i]) { comedi_subdevice_minor_table[i] = s; break; } } mutex_unlock(&comedi_subdevice_minor_table_lock); if (i == COMEDI_NUM_SUBDEVICE_MINORS) { dev_err(dev->class_dev, "ran out of minor numbers for subdevice files\n"); return -EBUSY; } i += COMEDI_NUM_BOARD_MINORS; s->minor = i; csdev = device_create(&comedi_class, dev->class_dev, MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i", dev->minor, s->index); if (!IS_ERR(csdev)) s->class_dev = csdev; return 0; } void comedi_free_subdevice_minor(struct comedi_subdevice *s) { unsigned int i; if (!s) return; if (s->minor < COMEDI_NUM_BOARD_MINORS || s->minor >= COMEDI_NUM_MINORS) return; i = s->minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); if (s == comedi_subdevice_minor_table[i]) comedi_subdevice_minor_table[i] = NULL; mutex_unlock(&comedi_subdevice_minor_table_lock); if (s->class_dev) { device_destroy(&comedi_class, MKDEV(COMEDI_MAJOR, s->minor)); s->class_dev = NULL; } } static void comedi_cleanup_board_minors(void) { struct comedi_device *dev; unsigned int i; for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { dev = comedi_clear_board_minor(i); comedi_free_board_dev(dev); } } static int __init comedi_init(void) { int i; int retval; pr_info("version " COMEDI_RELEASE " - http://www.comedi.org\n"); if (comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) { pr_err("invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n", COMEDI_NUM_BOARD_MINORS); return -EINVAL; } retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS, "comedi"); if (retval) return retval; cdev_init(&comedi_cdev, &comedi_fops); comedi_cdev.owner = THIS_MODULE; retval = kobject_set_name(&comedi_cdev.kobj, "comedi"); if (retval) goto out_unregister_chrdev_region; retval = cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); if (retval) goto out_unregister_chrdev_region; retval = class_register(&comedi_class); if (retval) { pr_err("failed to create class\n"); goto out_cdev_del; } /* create devices files for legacy/manual use */ for (i = 0; i < comedi_num_legacy_minors; i++) { struct comedi_device *dev; dev = comedi_alloc_board_minor(NULL); if (IS_ERR(dev)) { retval = PTR_ERR(dev); goto out_cleanup_board_minors; } /* comedi_alloc_board_minor() locked the mutex */ lockdep_assert_held(&dev->mutex); mutex_unlock(&dev->mutex); } /* XXX requires /proc interface */ comedi_proc_init(); return 0; out_cleanup_board_minors: comedi_cleanup_board_minors(); class_unregister(&comedi_class); out_cdev_del: cdev_del(&comedi_cdev); out_unregister_chrdev_region: unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); return retval; } module_init(comedi_init); static void __exit comedi_cleanup(void) { comedi_cleanup_board_minors(); class_unregister(&comedi_class); cdev_del(&comedi_cdev); unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); comedi_proc_cleanup(); } module_exit(comedi_cleanup); MODULE_AUTHOR("https://www.comedi.org"); MODULE_DESCRIPTION("Comedi core module"); MODULE_LICENSE("GPL"); |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. * Author: Joerg Roedel <jroedel@suse.de> */ #define pr_fmt(fmt) "iommu: " fmt #include <linux/amba/bus.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/bits.h> #include <linux/bug.h> #include <linux/types.h> #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/host1x_context_bus.h> #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/pci-ats.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/fsl/mc.h> #include <linux/module.h> #include <linux/cc_platform.h> #include <linux/cdx/cdx_bus.h> #include <trace/events/iommu.h> #include <linux/sched/mm.h> #include <linux/msi.h> #include <uapi/linux/iommufd.h> #include "dma-iommu.h" #include "iommu-priv.h" static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_global_pasid_ida); static unsigned int iommu_def_domain_type __read_mostly; static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); static u32 iommu_cmd_line __read_mostly; /* Tags used with xa_tag_pointer() in group->pasid_array */ enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 }; struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; struct list_head devices; struct xarray pasid_array; struct mutex mutex; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct group_device { struct list_head list; struct device *dev; char *name; }; /* Iterate over each struct group_device in a struct iommu_group */ #define for_each_group_device(group, pos) \ list_for_each_entry(pos, &(group)->devices, list) struct iommu_group_attribute { struct attribute attr; ssize_t (*show)(struct iommu_group *group, char *buf); ssize_t (*store)(struct iommu_group *group, const char *buf, size_t count); }; static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_DIRECT] = "direct", [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", [IOMMU_RESV_RESERVED] = "reserved", [IOMMU_RESV_MSI] = "msi", [IOMMU_RESV_SW_MSI] = "msi", }; #define IOMMU_CMD_LINE_DMA_API BIT(0) #define IOMMU_CMD_LINE_STRICT BIT(1) static int bus_iommu_probe(const struct bus_type *bus); static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data); static void iommu_release_device(struct device *dev); static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev, struct iommu_domain *old); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, unsigned int flags); enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, }; static int __iommu_device_set_domain(struct iommu_group *group, struct device *dev, struct iommu_domain *new_domain, struct iommu_domain *old_domain, unsigned int flags); static int __iommu_group_set_domain_internal(struct iommu_group *group, struct iommu_domain *new_domain, unsigned int flags); static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain) { return __iommu_group_set_domain_internal(group, new_domain, 0); } static void __iommu_group_set_domain_nofail(struct iommu_group *group, struct iommu_domain *new_domain) { WARN_ON(__iommu_group_set_domain_internal( group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); } static int iommu_setup_default_domain(struct iommu_group *group, int target_type); static int iommu_create_device_direct_mappings(struct iommu_domain *domain, struct device *dev); static ssize_t iommu_group_store_type(struct iommu_group *group, const char *buf, size_t count); static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev); static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev); static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, const struct iommu_ops *ops); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) #define to_iommu_group_attr(_attr) \ container_of(_attr, struct iommu_group_attribute, attr) #define to_iommu_group(_kobj) \ container_of(_kobj, struct iommu_group, kobj) static LIST_HEAD(iommu_device_list); static DEFINE_SPINLOCK(iommu_device_lock); static const struct bus_type * const iommu_buses[] = { &platform_bus_type, #ifdef CONFIG_PCI &pci_bus_type, #endif #ifdef CONFIG_ARM_AMBA &amba_bustype, #endif #ifdef CONFIG_FSL_MC_BUS &fsl_mc_bus_type, #endif #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS &host1x_context_device_bus_type, #endif #ifdef CONFIG_CDX_BUS &cdx_bus_type, #endif }; /* * Use a function instead of an array here because the domain-type is a * bit-field, so an array would waste memory. */ static const char *iommu_domain_type_str(unsigned int t) { switch (t) { case IOMMU_DOMAIN_BLOCKED: return "Blocked"; case IOMMU_DOMAIN_IDENTITY: return "Passthrough"; case IOMMU_DOMAIN_UNMANAGED: return "Unmanaged"; case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA_FQ: return "Translated"; case IOMMU_DOMAIN_PLATFORM: return "Platform"; default: return "Unknown"; } } static int __init iommu_subsys_init(void) { struct notifier_block *nb; if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) iommu_set_default_passthrough(false); else iommu_set_default_translated(false); if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); iommu_set_default_translated(false); } } if (!iommu_default_passthrough() && !iommu_dma_strict) iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; pr_info("Default domain type: %s%s\n", iommu_domain_type_str(iommu_def_domain_type), (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? " (set via kernel command line)" : ""); if (!iommu_default_passthrough()) pr_info("DMA domain TLB invalidation policy: %s mode%s\n", iommu_dma_strict ? "strict" : "lazy", (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? " (set via kernel command line)" : ""); nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); if (!nb) return -ENOMEM; for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { nb[i].notifier_call = iommu_bus_notifier; bus_register_notifier(iommu_buses[i], &nb[i]); } return 0; } subsys_initcall(iommu_subsys_init); static int remove_iommu_group(struct device *dev, void *data) { if (dev->iommu && dev->iommu->iommu_dev == data) iommu_release_device(dev); return 0; } /** * iommu_device_register() - Register an IOMMU hardware instance * @iommu: IOMMU handle for the instance * @ops: IOMMU ops to associate with the instance * @hwdev: (optional) actual instance device, used for fwnode lookup * * Return: 0 on success, or an error. */ int iommu_device_register(struct iommu_device *iommu, const struct iommu_ops *ops, struct device *hwdev) { int err = 0; /* We need to be able to take module references appropriately */ if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) return -EINVAL; iommu->ops = ops; if (hwdev) iommu->fwnode = dev_fwnode(hwdev); spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) err = bus_iommu_probe(iommu_buses[i]); if (err) iommu_device_unregister(iommu); else WRITE_ONCE(iommu->ready, true); return err; } EXPORT_SYMBOL_GPL(iommu_device_register); void iommu_device_unregister(struct iommu_device *iommu) { for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); spin_lock(&iommu_device_lock); list_del(&iommu->list); spin_unlock(&iommu_device_lock); /* Pairs with the alloc in generic_single_device_group() */ iommu_group_put(iommu->singleton_group); iommu->singleton_group = NULL; } EXPORT_SYMBOL_GPL(iommu_device_unregister); #if IS_ENABLED(CONFIG_IOMMUFD_TEST) void iommu_device_unregister_bus(struct iommu_device *iommu, const struct bus_type *bus, struct notifier_block *nb) { bus_unregister_notifier(bus, nb); fwnode_remove_software_node(iommu->fwnode); iommu_device_unregister(iommu); } EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); /* * Register an iommu driver against a single bus. This is only used by iommufd * selftest to create a mock iommu driver. The caller must provide * some memory to hold a notifier_block. */ int iommu_device_register_bus(struct iommu_device *iommu, const struct iommu_ops *ops, const struct bus_type *bus, struct notifier_block *nb) { int err; iommu->ops = ops; nb->notifier_call = iommu_bus_notifier; err = bus_register_notifier(bus, nb); if (err) return err; iommu->fwnode = fwnode_create_software_node(NULL, NULL); if (IS_ERR(iommu->fwnode)) { bus_unregister_notifier(bus, nb); return PTR_ERR(iommu->fwnode); } spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); err = bus_iommu_probe(bus); if (err) { iommu_device_unregister_bus(iommu, bus, nb); return err; } WRITE_ONCE(iommu->ready, true); return 0; } EXPORT_SYMBOL_GPL(iommu_device_register_bus); int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu) { int rc; mutex_lock(&iommu_probe_device_lock); rc = iommu_fwspec_init(dev, iommu->fwnode); mutex_unlock(&iommu_probe_device_lock); if (rc) return rc; rc = device_add(dev); if (rc) iommu_fwspec_free(dev); return rc; } EXPORT_SYMBOL_GPL(iommu_mock_device_add); #endif static struct dev_iommu *dev_iommu_get(struct device *dev) { struct dev_iommu *param = dev->iommu; lockdep_assert_held(&iommu_probe_device_lock); if (param) return param; param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return NULL; mutex_init(¶m->lock); dev->iommu = param; return param; } void dev_iommu_free(struct device *dev) { struct dev_iommu *param = dev->iommu; dev->iommu = NULL; if (param->fwspec) { fwnode_handle_put(param->fwspec->iommu_fwnode); kfree(param->fwspec); } kfree(param); } /* * Internal equivalent of device_iommu_mapped() for when we care that a device * actually has API ops, and don't want false positives from VFIO-only groups. */ static bool dev_has_iommu(struct device *dev) { return dev->iommu && dev->iommu->iommu_dev; } static u32 dev_iommu_get_max_pasids(struct device *dev) { u32 max_pasids = 0, bits = 0; int ret; if (dev_is_pci(dev)) { ret = pci_max_pasids(to_pci_dev(dev)); if (ret > 0) max_pasids = ret; } else { ret = device_property_read_u32(dev, "pasid-num-bits", &bits); if (!ret) max_pasids = 1UL << bits; } return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); } void dev_iommu_priv_set(struct device *dev, void *priv) { /* FSL_PAMU does something weird */ if (!IS_ENABLED(CONFIG_FSL_PAMU)) lockdep_assert_held(&iommu_probe_device_lock); dev->iommu->priv = priv; } EXPORT_SYMBOL_GPL(dev_iommu_priv_set); /* * Init the dev->iommu and dev->iommu_group in the struct device and get the * driver probed */ static int iommu_init_device(struct device *dev) { const struct iommu_ops *ops; struct iommu_device *iommu_dev; struct iommu_group *group; int ret; if (!dev_iommu_get(dev)) return -ENOMEM; /* * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing * is buried in the bus dma_configure path. Properly unpicking that is * still a big job, so for now just invoke the whole thing. The device * already having a driver bound means dma_configure has already run and * found no IOMMU to wait for, so there's no point calling it again. */ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) { mutex_unlock(&iommu_probe_device_lock); dev->bus->dma_configure(dev); mutex_lock(&iommu_probe_device_lock); /* If another instance finished the job for us, skip it */ if (!dev->iommu || dev->iommu_group) return -ENODEV; } /* * At this point, relevant devices either now have a fwspec which will * match ops registered with a non-NULL fwnode, or we can reasonably * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can * be present, and that any of their registered instances has suitable * ops for probing, and thus cheekily co-opt the same mechanism. */ ops = iommu_fwspec_ops(dev->iommu->fwspec); if (!ops) { ret = -ENODEV; goto err_free; } if (!try_module_get(ops->owner)) { ret = -EINVAL; goto err_free; } iommu_dev = ops->probe_device(dev); if (IS_ERR(iommu_dev)) { ret = PTR_ERR(iommu_dev); goto err_module_put; } dev->iommu->iommu_dev = iommu_dev; ret = iommu_device_link(iommu_dev, dev); if (ret) goto err_release; group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) group = ERR_PTR(-EINVAL); if (IS_ERR(group)) { ret = PTR_ERR(group); goto err_unlink; } dev->iommu_group = group; dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); if (ops->is_attach_deferred) dev->iommu->attach_deferred = ops->is_attach_deferred(dev); return 0; err_unlink: iommu_device_unlink(iommu_dev, dev); err_release: if (ops->release_device) ops->release_device(dev); err_module_put: module_put(ops->owner); err_free: dev->iommu->iommu_dev = NULL; dev_iommu_free(dev); return ret; } static void iommu_deinit_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; const struct iommu_ops *ops = dev_iommu_ops(dev); lockdep_assert_held(&group->mutex); iommu_device_unlink(dev->iommu->iommu_dev, dev); /* * release_device() must stop using any attached domain on the device. * If there are still other devices in the group, they are not affected * by this callback. * * If the iommu driver provides release_domain, the core code ensures * that domain is attached prior to calling release_device. Drivers can * use this to enforce a translation on the idle iommu. Typically, the * global static blocked_domain is a good choice. * * Otherwise, the iommu driver must set the device to either an identity * or a blocking translation in release_device() and stop using any * domain pointer, as it is going to be freed. * * Regardless, if a delayed attach never occurred, then the release * should still avoid touching any hardware configuration either. */ if (!dev->iommu->attach_deferred && ops->release_domain) { struct iommu_domain *release_domain = ops->release_domain; /* * If the device requires direct mappings then it should not * be parked on a BLOCKED domain during release as that would * break the direct mappings. */ if (dev->iommu->require_direct && ops->identity_domain && release_domain == ops->blocked_domain) release_domain = ops->identity_domain; release_domain->ops->attach_dev(release_domain, dev, group->domain); } if (ops->release_device) ops->release_device(dev); /* * If this is the last driver to use the group then we must free the * domains before we do the module_put(). */ if (list_empty(&group->devices)) { if (group->default_domain) { iommu_domain_free(group->default_domain); group->default_domain = NULL; } if (group->blocking_domain) { iommu_domain_free(group->blocking_domain); group->blocking_domain = NULL; } group->domain = NULL; } /* Caller must put iommu_group */ dev->iommu_group = NULL; module_put(ops->owner); dev_iommu_free(dev); #ifdef CONFIG_IOMMU_DMA dev->dma_iommu = false; #endif } static struct iommu_domain *pasid_array_entry_to_domain(void *entry) { if (xa_pointer_tag(entry) == IOMMU_PASID_ARRAY_DOMAIN) return xa_untag_pointer(entry); return ((struct iommu_attach_handle *)xa_untag_pointer(entry))->domain; } DEFINE_MUTEX(iommu_probe_device_lock); static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { struct iommu_group *group; struct group_device *gdev; int ret; /* * Serialise to avoid races between IOMMU drivers registering in * parallel and/or the "replay" calls from ACPI/OF code via client * driver probe. Once the latter have been cleaned up we should * probably be able to use device_lock() here to minimise the scope, * but for now enforcing a simple global ordering is fine. */ lockdep_assert_held(&iommu_probe_device_lock); /* Device is probed already if in a group */ if (dev->iommu_group) return 0; ret = iommu_init_device(dev); if (ret) return ret; /* * And if we do now see any replay calls, they would indicate someone * misusing the dma_configure path outside bus code. */ if (dev->driver) dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n"); group = dev->iommu_group; gdev = iommu_group_alloc_device(group, dev); mutex_lock(&group->mutex); if (IS_ERR(gdev)) { ret = PTR_ERR(gdev); goto err_put_group; } /* * The gdev must be in the list before calling * iommu_setup_default_domain() */ list_add_tail(&gdev->list, &group->devices); WARN_ON(group->default_domain && !group->domain); if (group->default_domain) iommu_create_device_direct_mappings(group->default_domain, dev); if (group->domain) { ret = __iommu_device_set_domain(group, dev, group->domain, NULL, 0); if (ret) goto err_remove_gdev; } else if (!group->default_domain && !group_list) { ret = iommu_setup_default_domain(group, 0); if (ret) goto err_remove_gdev; } else if (!group->default_domain) { /* * With a group_list argument we defer the default_domain setup * to the caller by providing a de-duplicated list of groups * that need further setup. */ if (list_empty(&group->entry)) list_add_tail(&group->entry, group_list); } if (group->default_domain) iommu_setup_dma_ops(dev); mutex_unlock(&group->mutex); return 0; err_remove_gdev: list_del(&gdev->list); __iommu_group_free_device(group, gdev); err_put_group: iommu_deinit_device(dev); mutex_unlock(&group->mutex); iommu_group_put(group); return ret; } int iommu_probe_device(struct device *dev) { const struct iommu_ops *ops; int ret; mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, NULL); mutex_unlock(&iommu_probe_device_lock); if (ret) return ret; ops = dev_iommu_ops(dev); if (ops->probe_finalize) ops->probe_finalize(dev); return 0; } static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev) { struct device *dev = grp_dev->dev; sysfs_remove_link(group->devices_kobj, grp_dev->name); sysfs_remove_link(&dev->kobj, "iommu_group"); trace_remove_device_from_group(group->id, dev); /* * If the group has become empty then ownership must have been * released, and the current domain must be set back to NULL or * the default domain. */ if (list_empty(&group->devices)) WARN_ON(group->owner_cnt || group->domain != group->default_domain); kfree(grp_dev->name); kfree(grp_dev); } /* Remove the iommu_group from the struct device. */ static void __iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; struct group_device *device; mutex_lock(&group->mutex); for_each_group_device(group, device) { if (device->dev != dev) continue; list_del(&device->list); __iommu_group_free_device(group, device); if (dev_has_iommu(dev)) iommu_deinit_device(dev); else dev->iommu_group = NULL; break; } mutex_unlock(&group->mutex); /* * Pairs with the get in iommu_init_device() or * iommu_group_add_device() */ iommu_group_put(group); } static void iommu_release_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (group) __iommu_group_remove_device(dev); /* Free any fwspec if no iommu_driver was ever attached */ if (dev->iommu) dev_iommu_free(dev); } static int __init iommu_set_def_domain_type(char *str) { bool pt; int ret; ret = kstrtobool(str, &pt); if (ret) return ret; if (pt) iommu_set_default_passthrough(true); else iommu_set_default_translated(true); return 0; } early_param("iommu.passthrough", iommu_set_def_domain_type); static int __init iommu_dma_setup(char *str) { int ret = kstrtobool(str, &iommu_dma_strict); if (!ret) iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; return ret; } early_param("iommu.strict", iommu_dma_setup); void iommu_set_dma_strict(void) { iommu_dma_strict = true; if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) iommu_def_domain_type = IOMMU_DOMAIN_DMA; } static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); struct iommu_group *group = to_iommu_group(kobj); ssize_t ret = -EIO; if (attr->show) ret = attr->show(group, buf); return ret; } static ssize_t iommu_group_attr_store(struct kobject *kobj, struct attribute *__attr, const char *buf, size_t count) { struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); struct iommu_group *group = to_iommu_group(kobj); ssize_t ret = -EIO; if (attr->store) ret = attr->store(group, buf, count); return ret; } static const struct sysfs_ops iommu_group_sysfs_ops = { .show = iommu_group_attr_show, .store = iommu_group_attr_store, }; static int iommu_group_create_file(struct iommu_group *group, struct iommu_group_attribute *attr) { return sysfs_create_file(&group->kobj, &attr->attr); } static void iommu_group_remove_file(struct iommu_group *group, struct iommu_group_attribute *attr) { sysfs_remove_file(&group->kobj, &attr->attr); } static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) { return sysfs_emit(buf, "%s\n", group->name); } /** * iommu_insert_resv_region - Insert a new region in the * list of reserved regions. * @new: new region to insert * @regions: list of regions * * Elements are sorted by start address and overlapping segments * of the same type are merged. */ static int iommu_insert_resv_region(struct iommu_resv_region *new, struct list_head *regions) { struct iommu_resv_region *iter, *tmp, *nr, *top; LIST_HEAD(stack); nr = iommu_alloc_resv_region(new->start, new->length, new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM; /* First add the new element based on start address sorting */ list_for_each_entry(iter, regions, list) { if (nr->start < iter->start || (nr->start == iter->start && nr->type <= iter->type)) break; } list_add_tail(&nr->list, &iter->list); /* Merge overlapping segments of type nr->type in @regions, if any */ list_for_each_entry_safe(iter, tmp, regions, list) { phys_addr_t top_end, iter_end = iter->start + iter->length - 1; /* no merge needed on elements of different types than @new */ if (iter->type != new->type) { list_move_tail(&iter->list, &stack); continue; } /* look for the last stack element of same type as @iter */ list_for_each_entry_reverse(top, &stack, list) if (top->type == iter->type) goto check_overlap; list_move_tail(&iter->list, &stack); continue; check_overlap: top_end = top->start + top->length - 1; if (iter->start > top_end + 1) { list_move_tail(&iter->list, &stack); } else { top->length = max(top_end, iter_end) - top->start + 1; list_del(&iter->list); kfree(iter); } } list_splice(&stack, regions); return 0; } static int iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, struct list_head *group_resv_regions) { struct iommu_resv_region *entry; int ret = 0; list_for_each_entry(entry, dev_resv_regions, list) { ret = iommu_insert_resv_region(entry, group_resv_regions); if (ret) break; } return ret; } int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head) { struct group_device *device; int ret = 0; mutex_lock(&group->mutex); for_each_group_device(group, device) { struct list_head dev_resv_regions; /* * Non-API groups still expose reserved_regions in sysfs, * so filter out calls that get here that way. */ if (!dev_has_iommu(device->dev)) break; INIT_LIST_HEAD(&dev_resv_regions); iommu_get_resv_regions(device->dev, &dev_resv_regions); ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); iommu_put_resv_regions(device->dev, &dev_resv_regions); if (ret) break; } mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, char *buf) { struct iommu_resv_region *region, *next; struct list_head group_resv_regions; int offset = 0; INIT_LIST_HEAD(&group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions); list_for_each_entry_safe(region, next, &group_resv_regions, list) { offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", (long long)region->start, (long long)(region->start + region->length - 1), iommu_group_resv_type_string[region->type]); kfree(region); } return offset; } static ssize_t iommu_group_show_type(struct iommu_group *group, char *buf) { char *type = "unknown"; mutex_lock(&group->mutex); if (group->default_domain) { switch (group->default_domain->type) { case IOMMU_DOMAIN_BLOCKED: type = "blocked"; break; case IOMMU_DOMAIN_IDENTITY: type = "identity"; break; case IOMMU_DOMAIN_UNMANAGED: type = "unmanaged"; break; case IOMMU_DOMAIN_DMA: type = "DMA"; break; case IOMMU_DOMAIN_DMA_FQ: type = "DMA-FQ"; break; } } mutex_unlock(&group->mutex); return sysfs_emit(buf, "%s\n", type); } static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(reserved_regions, 0444, iommu_group_show_resv_regions, NULL); static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, iommu_group_store_type); static void iommu_group_release(struct kobject *kobj) { struct iommu_group *group = to_iommu_group(kobj); pr_debug("Releasing group %d\n", group->id); if (group->iommu_data_release) group->iommu_data_release(group->iommu_data); ida_free(&iommu_group_ida, group->id); /* Domains are free'd by iommu_deinit_device() */ WARN_ON(group->default_domain); WARN_ON(group->blocking_domain); kfree(group->name); kfree(group); } static const struct kobj_type iommu_group_ktype = { .sysfs_ops = &iommu_group_sysfs_ops, .release = iommu_group_release, }; /** * iommu_group_alloc - Allocate a new group * * This function is called by an iommu driver to allocate a new iommu * group. The iommu group represents the minimum granularity of the iommu. * Upon successful return, the caller holds a reference to the supplied * group in order to hold the group until devices are added. Use * iommu_group_put() to release this extra reference count, allowing the * group to be automatically reclaimed once it has no devices or external * references. */ struct iommu_group *iommu_group_alloc(void) { struct iommu_group *group; int ret; group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); group->kobj.kset = iommu_group_kset; mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); xa_init(&group->pasid_array); ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); if (ret < 0) { kfree(group); return ERR_PTR(ret); } group->id = ret; ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, NULL, "%d", group->id); if (ret) { kobject_put(&group->kobj); return ERR_PTR(ret); } group->devices_kobj = kobject_create_and_add("devices", &group->kobj); if (!group->devices_kobj) { kobject_put(&group->kobj); /* triggers .release & free */ return ERR_PTR(-ENOMEM); } /* * The devices_kobj holds a reference on the group kobject, so * as long as that exists so will the group. We can therefore * use the devices_kobj for reference counting. */ kobject_put(&group->kobj); ret = iommu_group_create_file(group, &iommu_group_attr_reserved_regions); if (ret) { kobject_put(group->devices_kobj); return ERR_PTR(ret); } ret = iommu_group_create_file(group, &iommu_group_attr_type); if (ret) { kobject_put(group->devices_kobj); return ERR_PTR(ret); } pr_debug("Allocated group %d\n", group->id); return group; } EXPORT_SYMBOL_GPL(iommu_group_alloc); /** * iommu_group_get_iommudata - retrieve iommu_data registered for a group * @group: the group * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to retrieve it. Caller * should hold a group reference. */ void *iommu_group_get_iommudata(struct iommu_group *group) { return group->iommu_data; } EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); /** * iommu_group_set_iommudata - set iommu_data for a group * @group: the group * @iommu_data: new data * @release: release function for iommu_data * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to set the data after * the group has been allocated. Caller should hold a group reference. */ void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data)) { group->iommu_data = iommu_data; group->iommu_data_release = release; } EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); /** * iommu_group_set_name - set name for a group * @group: the group * @name: name * * Allow iommu driver to set a name for a group. When set it will * appear in a name attribute file under the group in sysfs. */ int iommu_group_set_name(struct iommu_group *group, const char *name) { int ret; if (group->name) { iommu_group_remove_file(group, &iommu_group_attr_name); kfree(group->name); group->name = NULL; if (!name) return 0; } group->name = kstrdup(name, GFP_KERNEL); if (!group->name) return -ENOMEM; ret = iommu_group_create_file(group, &iommu_group_attr_name); if (ret) { kfree(group->name); group->name = NULL; return ret; } return 0; } EXPORT_SYMBOL_GPL(iommu_group_set_name); static int iommu_create_device_direct_mappings(struct iommu_domain *domain, struct device *dev) { struct iommu_resv_region *entry; struct list_head mappings; unsigned long pg_size; int ret = 0; pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; INIT_LIST_HEAD(&mappings); if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) return -EINVAL; iommu_get_resv_regions(dev, &mappings); /* We need to consider overlapping regions for different devices */ list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; size_t map_size = 0; if (entry->type == IOMMU_RESV_DIRECT) dev->iommu->require_direct = 1; if ((entry->type != IOMMU_RESV_DIRECT && entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || !iommu_is_dma_domain(domain)) continue; start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); for (addr = start; addr <= end; addr += pg_size) { phys_addr_t phys_addr; if (addr == end) goto map_end; phys_addr = iommu_iova_to_phys(domain, addr); if (!phys_addr) { map_size += pg_size; continue; } map_end: if (map_size) { ret = iommu_map(domain, addr - map_size, addr - map_size, map_size, entry->prot, GFP_KERNEL); if (ret) goto out; map_size = 0; } } } out: iommu_put_resv_regions(dev, &mappings); return ret; } /* This is undone by __iommu_group_free_device() */ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev) { int ret, i = 0; struct group_device *device; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return ERR_PTR(-ENOMEM); device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); if (ret) goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { ret = -ENOMEM; goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } goto err_free_name; } trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); return device; err_free_name: kfree(device->name); err_remove_link: sysfs_remove_link(&dev->kobj, "iommu_group"); err_free_device: kfree(device); dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); return ERR_PTR(ret); } /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) * @dev: the device * * This function is called by an iommu driver to add a device into a * group. Adding a device increments the group reference count. */ int iommu_group_add_device(struct iommu_group *group, struct device *dev) { struct group_device *gdev; gdev = iommu_group_alloc_device(group, dev); if (IS_ERR(gdev)) return PTR_ERR(gdev); iommu_group_ref_get(group); dev->iommu_group = group; mutex_lock(&group->mutex); list_add_tail(&gdev->list, &group->devices); mutex_unlock(&group->mutex); return 0; } EXPORT_SYMBOL_GPL(iommu_group_add_device); /** * iommu_group_remove_device - remove a device from it's current group * @dev: device to be removed * * This function is called by an iommu driver to remove the device from * it's current group. This decrements the iommu group reference count. */ void iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (!group) return; dev_info(dev, "Removing from iommu group %d\n", group->id); __iommu_group_remove_device(dev); } EXPORT_SYMBOL_GPL(iommu_group_remove_device); #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) /** * iommu_group_mutex_assert - Check device group mutex lock * @dev: the device that has group param set * * This function is called by an iommu driver to check whether it holds * group mutex lock for the given device or not. * * Note that this function must be called after device group param is set. */ void iommu_group_mutex_assert(struct device *dev) { struct iommu_group *group = dev->iommu_group; lockdep_assert_held(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_mutex_assert); #endif static struct device *iommu_group_first_dev(struct iommu_group *group) { lockdep_assert_held(&group->mutex); return list_first_entry(&group->devices, struct group_device, list)->dev; } /** * iommu_group_for_each_dev - iterate over each device in the group * @group: the group * @data: caller opaque data to be passed to callback function * @fn: caller supplied callback function * * This function is called by group users to iterate over group devices. * Callers should hold a reference count to the group during callback. * The group->mutex is held across callbacks, which will block calls to * iommu_group_add/remove_device. */ int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { struct group_device *device; int ret = 0; mutex_lock(&group->mutex); for_each_group_device(group, device) { ret = fn(device->dev, data); if (ret) break; } mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); /** * iommu_group_get - Return the group for a device and increment reference * @dev: get the group that this device belongs to * * This function is called by iommu drivers and users to get the group * for the specified device. If found, the group is returned and the group * reference in incremented, else NULL. */ struct iommu_group *iommu_group_get(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (group) kobject_get(group->devices_kobj); return group; } EXPORT_SYMBOL_GPL(iommu_group_get); /** * iommu_group_ref_get - Increment reference on a group * @group: the group to use, must not be NULL * * This function is called by iommu drivers to take additional references on an * existing group. Returns the given group for convenience. */ struct iommu_group *iommu_group_ref_get(struct iommu_group *group) { kobject_get(group->devices_kobj); return group; } EXPORT_SYMBOL_GPL(iommu_group_ref_get); /** * iommu_group_put - Decrement group reference * @group: the group to use * * This function is called by iommu drivers and users to release the * iommu group. Once the reference count is zero, the group is released. */ void iommu_group_put(struct iommu_group *group) { if (group) kobject_put(group->devices_kobj); } EXPORT_SYMBOL_GPL(iommu_group_put); /** * iommu_group_id - Return ID for a group * @group: the group to ID * * Return the unique ID for the group matching the sysfs group number. */ int iommu_group_id(struct iommu_group *group) { return group->id; } EXPORT_SYMBOL_GPL(iommu_group_id); static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, unsigned long *devfns); /* * To consider a PCI device isolated, we require ACS to support Source * Validation, Request Redirection, Completer Redirection, and Upstream * Forwarding. This effectively means that devices cannot spoof their * requester ID, requests and completions cannot be redirected, and all * transactions are forwarded upstream, even as it passes through a * bridge where the target device is downstream. */ #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) /* * For multifunction devices which are not isolated from each other, find * all the other non-isolated functions and look for existing groups. For * each function, we also need to look for aliases to or from other devices * that may already have a group. */ static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, unsigned long *devfns) { struct pci_dev *tmp = NULL; struct iommu_group *group; if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) return NULL; for_each_pci_dev(tmp) { if (tmp == pdev || tmp->bus != pdev->bus || PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || pci_acs_enabled(tmp, REQ_ACS_FLAGS)) continue; group = get_pci_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } } return NULL; } /* * Look for aliases to or from the given device for existing groups. DMA * aliases are only supported on the same bus, therefore the search * space is quite small (especially since we're really only looking at pcie * device, and therefore only expect multiple slots on the root complex or * downstream switch ports). It's conceivable though that a pair of * multifunction devices could have aliases between them that would cause a * loop. To prevent this, we use a bitmap to track where we've been. */ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, unsigned long *devfns) { struct pci_dev *tmp = NULL; struct iommu_group *group; if (test_and_set_bit(pdev->devfn & 0xff, devfns)) return NULL; group = iommu_group_get(&pdev->dev); if (group) return group; for_each_pci_dev(tmp) { if (tmp == pdev || tmp->bus != pdev->bus) continue; /* We alias them or they alias us */ if (pci_devs_are_dma_aliases(pdev, tmp)) { group = get_pci_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } group = get_pci_function_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } } } return NULL; } struct group_for_pci_data { struct pci_dev *pdev; struct iommu_group *group; }; /* * DMA alias iterator callback, return the last seen device. Stop and return * the IOMMU group if we find one along the way. */ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) { struct group_for_pci_data *data = opaque; data->pdev = pdev; data->group = iommu_group_get(&pdev->dev); return data->group != NULL; } /* * Generic device_group call-back function. It just allocates one * iommu-group per device. */ struct iommu_group *generic_device_group(struct device *dev) { return iommu_group_alloc(); } EXPORT_SYMBOL_GPL(generic_device_group); /* * Generic device_group call-back function. It just allocates one * iommu-group per iommu driver instance shared by every device * probed by that iommu driver. */ struct iommu_group *generic_single_device_group(struct device *dev) { struct iommu_device *iommu = dev->iommu->iommu_dev; if (!iommu->singleton_group) { struct iommu_group *group; group = iommu_group_alloc(); if (IS_ERR(group)) return group; iommu->singleton_group = group; } return iommu_group_ref_get(iommu->singleton_group); } EXPORT_SYMBOL_GPL(generic_single_device_group); /* * Use standard PCI bus topology, isolation features, and DMA alias quirks * to find or create an IOMMU group for a device. */ struct iommu_group *pci_device_group(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct group_for_pci_data data; struct pci_bus *bus; struct iommu_group *group = NULL; u64 devfns[4] = { 0 }; if (WARN_ON(!dev_is_pci(dev))) return ERR_PTR(-EINVAL); /* * Find the upstream DMA alias for the device. A device must not * be aliased due to topology in order to have its own IOMMU group. * If we find an alias along the way that already belongs to a * group, use it. */ if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) return data.group; pdev = data.pdev; /* * Continue upstream from the point of minimum IOMMU granularity * due to aliases to the point where devices are protected from * peer-to-peer DMA by PCI ACS. Again, if we find an existing * group, use it. */ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { if (!bus->self) continue; if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; pdev = bus->self; group = iommu_group_get(&pdev->dev); if (group) return group; } /* * Look for existing groups on device aliases. If we alias another * device or another device aliases us, use the same group. */ group = get_pci_alias_group(pdev, (unsigned long *)devfns); if (group) return group; /* * Look for existing groups on non-isolated functions on the same * slot and aliases of those funcions, if any. No need to clear * the search bitmap, the tested devfns are still valid. */ group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); if (group) return group; /* No shared group found, allocate new */ return iommu_group_alloc(); } EXPORT_SYMBOL_GPL(pci_device_group); /* Get the IOMMU group for device on fsl-mc bus */ struct iommu_group *fsl_mc_device_group(struct device *dev) { struct device *cont_dev = fsl_mc_cont_dev(dev); struct iommu_group *group; group = iommu_group_get(cont_dev); if (!group) group = iommu_group_alloc(); return group; } EXPORT_SYMBOL_GPL(fsl_mc_device_group); static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (ops->identity_domain) return ops->identity_domain; if (ops->domain_alloc_identity) { domain = ops->domain_alloc_identity(dev); if (IS_ERR(domain)) return domain; } else { return ERR_PTR(-EOPNOTSUPP); } iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); return domain; } static struct iommu_domain * __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { struct device *dev = iommu_group_first_dev(group); struct iommu_domain *dom; if (group->default_domain && group->default_domain->type == req_type) return group->default_domain; /* * When allocating the DMA API domain assume that the driver is going to * use PASID and make sure the RID's domain is PASID compatible. */ if (req_type & __IOMMU_DOMAIN_PAGING) { dom = __iommu_paging_domain_alloc_flags(dev, req_type, dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); /* * If driver does not support PASID feature then * try to allocate non-PASID domain */ if (PTR_ERR(dom) == -EOPNOTSUPP) dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); return dom; } if (req_type == IOMMU_DOMAIN_IDENTITY) return __iommu_alloc_identity_domain(dev); return ERR_PTR(-EINVAL); } /* * req_type of 0 means "auto" which means to select a domain based on * iommu_def_domain_type or what the driver actually supports. */ static struct iommu_domain * iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); struct iommu_domain *dom; lockdep_assert_held(&group->mutex); /* * Allow legacy drivers to specify the domain that will be the default * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM * domain. Do not use in new drivers. */ if (ops->default_domain) { if (req_type != ops->default_domain->type) return ERR_PTR(-EINVAL); return ops->default_domain; } if (req_type) return __iommu_group_alloc_default_domain(group, req_type); /* The driver gave no guidance on what type to use, try the default */ dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); if (!IS_ERR(dom)) return dom; /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) return ERR_PTR(-EINVAL); dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); if (IS_ERR(dom)) return dom; pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", iommu_def_domain_type, group->name); return dom; } struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { return group->default_domain; } static int probe_iommu_group(struct device *dev, void *data) { struct list_head *group_list = data; int ret; mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, group_list); mutex_unlock(&iommu_probe_device_lock); if (ret == -ENODEV) ret = 0; return ret; } static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; ret = iommu_probe_device(dev); return (ret) ? NOTIFY_DONE : NOTIFY_OK; } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { iommu_release_device(dev); return NOTIFY_OK; } return 0; } /* * Combine the driver's chosen def_domain_type across all the devices in a * group. Drivers must give a consistent result. */ static int iommu_get_def_domain_type(struct iommu_group *group, struct device *dev, int cur_type) { const struct iommu_ops *ops = dev_iommu_ops(dev); int type; if (ops->default_domain) { /* * Drivers that declare a global static default_domain will * always choose that. */ type = ops->default_domain->type; } else { if (ops->def_domain_type) type = ops->def_domain_type(dev); else return cur_type; } if (!type || cur_type == type) return cur_type; if (!cur_type) return type; dev_err_ratelimited( dev, "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", iommu_domain_type_str(cur_type), iommu_domain_type_str(type), group->id); /* * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY * takes precedence. */ if (type == IOMMU_DOMAIN_IDENTITY) return type; return cur_type; } /* * A target_type of 0 will select the best domain type. 0 can be returned in * this case meaning the global default should be used. */ static int iommu_get_default_domain_type(struct iommu_group *group, int target_type) { struct device *untrusted = NULL; struct group_device *gdev; int driver_type = 0; lockdep_assert_held(&group->mutex); /* * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an * identity_domain and it will automatically become their default * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. * Override the selection to IDENTITY. */ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && IS_ENABLED(CONFIG_IOMMU_DMA))); driver_type = IOMMU_DOMAIN_IDENTITY; } for_each_group_device(group, gdev) { driver_type = iommu_get_def_domain_type(group, gdev->dev, driver_type); if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { /* * No ARM32 using systems will set untrusted, it cannot * work. */ if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) return -1; untrusted = gdev->dev; } } /* * If the common dma ops are not selected in kconfig then we cannot use * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been * selected. */ if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) return -1; if (!driver_type) driver_type = IOMMU_DOMAIN_IDENTITY; } if (untrusted) { if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { dev_err_ratelimited( untrusted, "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", group->id, iommu_domain_type_str(driver_type)); return -1; } driver_type = IOMMU_DOMAIN_DMA; } if (target_type) { if (driver_type && target_type != driver_type) return -1; return target_type; } return driver_type; } static void iommu_group_do_probe_finalize(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->probe_finalize) ops->probe_finalize(dev); } static int bus_iommu_probe(const struct bus_type *bus) { struct iommu_group *group, *next; LIST_HEAD(group_list); int ret; ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); if (ret) return ret; list_for_each_entry_safe(group, next, &group_list, entry) { struct group_device *gdev; mutex_lock(&group->mutex); /* Remove item from the list */ list_del_init(&group->entry); /* * We go to the trouble of deferred default domain creation so * that the cross-group default domain type and the setup of the * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. */ ret = iommu_setup_default_domain(group, 0); if (ret) { mutex_unlock(&group->mutex); return ret; } for_each_group_device(group, gdev) iommu_setup_dma_ops(gdev->dev); mutex_unlock(&group->mutex); /* * FIXME: Mis-locked because the ops->probe_finalize() call-back * of some IOMMU drivers calls arm_iommu_attach_device() which * in-turn might call back into IOMMU core code, where it tries * to take group->mutex, resulting in a deadlock. */ for_each_group_device(group, gdev) iommu_group_do_probe_finalize(gdev->dev); } return 0; } /** * device_iommu_capable() - check for a general IOMMU capability * @dev: device to which the capability would be relevant, if available * @cap: IOMMU capability * * Return: true if an IOMMU is present and supports the given capability * for the given device, otherwise false. */ bool device_iommu_capable(struct device *dev, enum iommu_cap cap) { const struct iommu_ops *ops; if (!dev_has_iommu(dev)) return false; ops = dev_iommu_ops(dev); if (!ops->capable) return false; return ops->capable(dev, cap); } EXPORT_SYMBOL_GPL(device_iommu_capable); /** * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() * for a group * @group: Group to query * * IOMMU groups should not have differing values of * msi_device_has_isolated_msi() for devices in a group. However nothing * directly prevents this, so ensure mistakes don't result in isolation failures * by checking that all the devices are the same. */ bool iommu_group_has_isolated_msi(struct iommu_group *group) { struct group_device *group_dev; bool ret = true; mutex_lock(&group->mutex); for_each_group_device(group, group_dev) ret &= msi_device_has_isolated_msi(group_dev->dev); mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); /** * iommu_set_fault_handler() - set a fault handler for an iommu domain * @domain: iommu domain * @handler: fault handler * @token: user data, will be passed back to the fault handler * * This function should be used by IOMMU users which want to be notified * whenever an IOMMU fault happens. * * The fault handler itself should return 0 on success, and an appropriate * error code otherwise. */ void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) { if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE)) return; domain->cookie_type = IOMMU_COOKIE_FAULT_HANDLER; domain->handler = handler; domain->handler_token = token; } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, const struct iommu_ops *ops) { domain->type = type; domain->owner = ops; if (!domain->ops) domain->ops = ops->default_domain_ops; } static struct iommu_domain * __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, unsigned int flags) { const struct iommu_ops *ops; struct iommu_domain *domain; if (!dev_has_iommu(dev)) return ERR_PTR(-ENODEV); ops = dev_iommu_ops(dev); if (ops->domain_alloc_paging && !flags) domain = ops->domain_alloc_paging(dev); else if (ops->domain_alloc_paging_flags) domain = ops->domain_alloc_paging_flags(dev, flags, NULL); #if IS_ENABLED(CONFIG_FSL_PAMU) else if (ops->domain_alloc && !flags) domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); #endif else return ERR_PTR(-EOPNOTSUPP); if (IS_ERR(domain)) return domain; if (!domain) return ERR_PTR(-ENOMEM); iommu_domain_init(domain, type, ops); return domain; } /** * iommu_paging_domain_alloc_flags() - Allocate a paging domain * @dev: device for which the domain is allocated * @flags: Bitmap of iommufd_hwpt_alloc_flags * * Allocate a paging domain which will be managed by a kernel driver. Return * allocated domain if successful, or an ERR pointer for failure. */ struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags) { return __iommu_paging_domain_alloc_flags(dev, IOMMU_DOMAIN_UNMANAGED, flags); } EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); void iommu_domain_free(struct iommu_domain *domain) { switch (domain->cookie_type) { case IOMMU_COOKIE_DMA_IOVA: iommu_put_dma_cookie(domain); break; case IOMMU_COOKIE_DMA_MSI: iommu_put_msi_cookie(domain); break; case IOMMU_COOKIE_SVA: mmdrop(domain->mm); break; default: break; } if (domain->ops->free) domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); /* * Put the group's domain back to the appropriate core-owned domain - either the * standard kernel-mode DMA configuration or an all-DMA-blocked domain. */ static void __iommu_group_set_core_domain(struct iommu_group *group) { struct iommu_domain *new_domain; if (group->owner) new_domain = group->blocking_domain; else new_domain = group->default_domain; __iommu_group_set_domain_nofail(group, new_domain); } static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev, struct iommu_domain *old) { int ret; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; ret = domain->ops->attach_dev(domain, dev, old); if (ret) return ret; dev->iommu->attach_deferred = 0; trace_attach_device_to_domain(dev); return 0; } /** * iommu_attach_device - Attach an IOMMU domain to a device * @domain: IOMMU domain to attach * @dev: Device that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the device. In this case attaching a different domain to the * device may succeed. */ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; int ret; if (!group) return -ENODEV; /* * Lock the group to make sure the device-count doesn't * change while we are attaching */ mutex_lock(&group->mutex); ret = -EINVAL; if (list_count_nodes(&group->devices) != 1) goto out_unlock; ret = __iommu_attach_group(domain, group); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { if (dev->iommu && dev->iommu->attach_deferred) return __iommu_attach_device(domain, dev, NULL); return 0; } void iommu_detach_device(struct iommu_domain *domain, struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; if (!group) return; mutex_lock(&group->mutex); if (WARN_ON(domain != group->domain) || WARN_ON(list_count_nodes(&group->devices) != 1)) goto out_unlock; __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_device); struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; if (!group) return NULL; return group->domain; } EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); /* * For IOMMU_DOMAIN_DMA implementations which already provide their own * guarantees that the group and its default domain are valid and correct. */ struct iommu_domain *iommu_get_dma_domain(struct device *dev) { return dev->iommu_group->default_domain; } static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, struct iommu_attach_handle *handle) { if (handle) { handle->domain = domain; return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE); } return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); } static bool domain_iommu_ops_compatible(const struct iommu_ops *ops, struct iommu_domain *domain) { if (domain->owner == ops) return true; /* For static domains, owner isn't set. */ if (domain == ops->blocked_domain || domain == ops->identity_domain) return true; return false; } static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { struct device *dev; if (group->domain && group->domain != group->default_domain && group->domain != group->blocking_domain) return -EBUSY; dev = iommu_group_first_dev(group); if (!dev_has_iommu(dev) || !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) return -EINVAL; return __iommu_group_set_domain(group, domain); } /** * iommu_attach_group - Attach an IOMMU domain to an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the group. In this case attaching a different domain to the * group may succeed. */ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { int ret; mutex_lock(&group->mutex); ret = __iommu_attach_group(domain, group); mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_group); static int __iommu_device_set_domain(struct iommu_group *group, struct device *dev, struct iommu_domain *new_domain, struct iommu_domain *old_domain, unsigned int flags) { int ret; /* * If the device requires IOMMU_RESV_DIRECT then we cannot allow * the blocking domain to be attached as it does not contain the * required 1:1 mapping. This test effectively excludes the device * being used with iommu_group_claim_dma_owner() which will block * vfio and iommufd as well. */ if (dev->iommu->require_direct && (new_domain->type == IOMMU_DOMAIN_BLOCKED || new_domain == group->blocking_domain)) { dev_warn(dev, "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); return -EINVAL; } if (dev->iommu->attach_deferred) { if (new_domain == group->default_domain) return 0; dev->iommu->attach_deferred = 0; } ret = __iommu_attach_device(new_domain, dev, old_domain); if (ret) { /* * If we have a blocking domain then try to attach that in hopes * of avoiding a UAF. Modern drivers should implement blocking * domains as global statics that cannot fail. */ if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && group->blocking_domain && group->blocking_domain != new_domain) __iommu_attach_device(group->blocking_domain, dev, old_domain); return ret; } return 0; } /* * If 0 is returned the group's domain is new_domain. If an error is returned * then the group's domain will be set back to the existing domain unless * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's * domains is left inconsistent. This is a driver bug to fail attach with a * previously good domain. We try to avoid a kernel UAF because of this. * * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU * API works on domains and devices. Bridge that gap by iterating over the * devices in a group. Ideally we'd have a single device which represents the * requestor ID of the group, but we also allow IOMMU drivers to create policy * defined minimum sets, where the physical hardware may be able to distiguish * members, but we wish to group them at a higher level (ex. untrusted * multi-function PCI devices). Thus we attach each device. */ static int __iommu_group_set_domain_internal(struct iommu_group *group, struct iommu_domain *new_domain, unsigned int flags) { struct group_device *last_gdev; struct group_device *gdev; int result; int ret; lockdep_assert_held(&group->mutex); if (group->domain == new_domain) return 0; if (WARN_ON(!new_domain)) return -EINVAL; /* * Changing the domain is done by calling attach_dev() on the new * domain. This switch does not have to be atomic and DMA can be * discarded during the transition. DMA must only be able to access * either new_domain or group->domain, never something else. */ result = 0; for_each_group_device(group, gdev) { ret = __iommu_device_set_domain(group, gdev->dev, new_domain, group->domain, flags); if (ret) { result = ret; /* * Keep trying the other devices in the group. If a * driver fails attach to an otherwise good domain, and * does not support blocking domains, it should at least * drop its reference on the current domain so we don't * UAF. */ if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) continue; goto err_revert; } } group->domain = new_domain; return result; err_revert: /* * This is called in error unwind paths. A well behaved driver should * always allow us to attach to a domain that was already attached. */ last_gdev = gdev; for_each_group_device(group, gdev) { /* No need to revert the last gdev that failed to set domain */ if (gdev == last_gdev) break; /* * A NULL domain can happen only for first probe, in which case * we leave group->domain as NULL and let release clean * everything up. */ if (group->domain) WARN_ON(__iommu_device_set_domain( group, gdev->dev, group->domain, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); } return ret; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; if (domain->type == IOMMU_DOMAIN_BLOCKED) return 0; return domain->ops->iova_to_phys(domain, iova); } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, size_t *count) { unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; size_t offset, pgsize, pgsize_next; size_t offset_end; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); /* Constrain the page sizes further based on the maximum alignment */ if (likely(addr_merge)) pgsizes &= GENMASK(__ffs(addr_merge), 0); /* Make sure we have at least one suitable page size */ BUG_ON(!pgsizes); /* Pick the biggest page size remaining */ pgsize_idx = __fls(pgsizes); pgsize = BIT(pgsize_idx); if (!count) return pgsize; /* Find the next biggest support page size, if it exists */ pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); if (!pgsizes) goto out_set_count; pgsize_idx_next = __ffs(pgsizes); pgsize_next = BIT(pgsize_idx_next); /* * There's no point trying a bigger page size unless the virtual * and physical addresses are similarly offset within the larger page. */ if ((iova ^ paddr) & (pgsize_next - 1)) goto out_set_count; /* Calculate the offset to the next page size alignment boundary */ offset = pgsize_next - (addr_merge & (pgsize_next - 1)); /* * If size is big enough to accommodate the larger page, reduce * the number of smaller pages. */ if (!check_add_overflow(offset, pgsize_next, &offset_end) && offset_end <= size) size = offset; out_set_count: *count = size >> pgsize_idx; return pgsize; } int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_domain_ops *ops = domain->ops; unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; phys_addr_t orig_paddr = paddr; int ret = 0; might_sleep_if(gfpflags_allow_blocking(gfp)); if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) return -ENODEV; /* Discourage passing strange GFP flags */ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) return -EINVAL; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { size_t pgsize, count, mapped = 0; pgsize = iommu_pgsize(domain, iova, paddr, size, &count); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", iova, &paddr, pgsize, count); ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, gfp, &mapped); /* * Some pages may have been mapped, even if an error occurred, * so we should account for those so they can be unmapped. */ size -= mapped; if (ret) break; iova += mapped; paddr += mapped; } /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else trace_map(orig_iova, orig_paddr, orig_size); return ret; } int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) { const struct iommu_domain_ops *ops = domain->ops; if (!ops->iotlb_sync_map) return 0; return ops->iotlb_sync_map(domain, iova, size); } int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { int ret; ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp); if (ret) return ret; ret = iommu_sync_map(domain, iova, size); if (ret) iommu_unmap(domain, iova, size); return ret; } EXPORT_SYMBOL_GPL(iommu_map); static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { const struct iommu_domain_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; unsigned long orig_iova = iova; unsigned int min_pagesz; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return 0; if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) return 0; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); return 0; } pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); /* * Keep iterating until we either unmap 'size' bytes (or more) * or we hit an area that isn't mapped. */ while (unmapped < size) { size_t pgsize, count; pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); if (!unmapped_page) break; pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); iova += unmapped_page; unmapped += unmapped_page; } trace_unmap(orig_iova, size, unmapped); return unmapped; } /** * iommu_unmap() - Remove mappings from a range of IOVA * @domain: Domain to manipulate * @iova: IO virtual address to start * @size: Length of the range starting from @iova * * iommu_unmap() will remove a translation created by iommu_map(). It cannot * subdivide a mapping created by iommu_map(), so it should be called with IOVA * ranges that match what was passed to iommu_map(). The range can aggregate * contiguous iommu_map() calls so long as no individual range is split. * * Returns: Number of bytes of IOVA unmapped. iova + res will be the point * unmapping stopped. */ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { struct iommu_iotlb_gather iotlb_gather; size_t ret; iommu_iotlb_gather_init(&iotlb_gather); ret = __iommu_unmap(domain, iova, size, &iotlb_gather); iommu_iotlb_sync(domain, &iotlb_gather); return ret; } EXPORT_SYMBOL_GPL(iommu_unmap); /** * iommu_unmap_fast() - Remove mappings from a range of IOVA without IOTLB sync * @domain: Domain to manipulate * @iova: IO virtual address to start * @size: Length of the range starting from @iova * @iotlb_gather: range information for a pending IOTLB flush * * iommu_unmap_fast() will remove a translation created by iommu_map(). * It can't subdivide a mapping created by iommu_map(), so it should be * called with IOVA ranges that match what was passed to iommu_map(). The * range can aggregate contiguous iommu_map() calls so long as no individual * range is split. * * Basically iommu_unmap_fast() is the same as iommu_unmap() but for callers * which manage the IOTLB flushing externally to perform a batched sync. * * Returns: Number of bytes of IOVA unmapped. iova + res will be the point * unmapping stopped. */ size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { return __iommu_unmap(domain, iova, size, iotlb_gather); } EXPORT_SYMBOL_GPL(iommu_unmap_fast); ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) { size_t len = 0, mapped = 0; phys_addr_t start; unsigned int i = 0; int ret; while (i <= nents) { phys_addr_t s_phys = sg_phys(sg); if (len && s_phys != start + len) { ret = iommu_map_nosync(domain, iova + mapped, start, len, prot, gfp); if (ret) goto out_err; mapped += len; len = 0; } if (sg_dma_is_bus_address(sg)) goto next; if (len) { len += sg->length; } else { len = sg->length; start = s_phys; } next: if (++i < nents) sg = sg_next(sg); } ret = iommu_sync_map(domain, iova, mapped); if (ret) goto out_err; return mapped; out_err: /* undo mappings already done */ iommu_unmap(domain, iova, mapped); return ret; } EXPORT_SYMBOL_GPL(iommu_map_sg); /** * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * @domain: the iommu domain where the fault has happened * @dev: the device where the fault has happened * @iova: the faulting address * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) * * This function should be called by the low-level IOMMU implementations * whenever IOMMU faults happen, to allow high-level users, that are * interested in such events, to know about them. * * This event may be useful for several possible use cases: * - mere logging of the event * - dynamic TLB/PTE loading * - if restarting of the faulting device is required * * Returns 0 on success and an appropriate error code otherwise (if dynamic * PTE/TLB loading will one day be supported, implementations will be able * to tell whether it succeeded or not according to this return value). * * Specifically, -ENOSYS is returned if a fault handler isn't installed * (though fault handlers can also return -ENOSYS, in case they want to * elicit the default behavior of the IOMMU drivers). */ int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) { int ret = -ENOSYS; /* * if upper layers showed interest and installed a fault handler, * invoke it. */ if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER && domain->handler) ret = domain->handler(domain, dev, iova, flags, domain->handler_token); trace_io_page_fault(dev, iova, flags); return ret; } EXPORT_SYMBOL_GPL(report_iommu_fault); static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", NULL, kernel_kobj); BUG_ON(!iommu_group_kset); iommu_debugfs_setup(); return 0; } core_initcall(iommu_init); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirk) { if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; if (!domain->ops->set_pgtable_quirks) return -EINVAL; return domain->ops->set_pgtable_quirks(domain, quirk); } EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); /** * iommu_get_resv_regions - get reserved regions * @dev: device for which to get reserved regions * @list: reserved region list for device * * This returns a list of reserved IOVA regions specific to this device. * A domain user should not map IOVA in these ranges. */ void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->get_resv_regions) ops->get_resv_regions(dev, list); } EXPORT_SYMBOL_GPL(iommu_get_resv_regions); /** * iommu_put_resv_regions - release reserved regions * @dev: device for which to free reserved regions * @list: reserved region list for device * * This releases a reserved region list acquired by iommu_get_resv_regions(). */ void iommu_put_resv_regions(struct device *dev, struct list_head *list) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, list, list) { if (entry->free) entry->free(dev, entry); else kfree(entry); } } EXPORT_SYMBOL(iommu_put_resv_regions); struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type, gfp_t gfp) { struct iommu_resv_region *region; region = kzalloc(sizeof(*region), gfp); if (!region) return NULL; INIT_LIST_HEAD(®ion->list); region->start = start; region->length = length; region->prot = prot; region->type = type; return region; } EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); void iommu_set_default_passthrough(bool cmd_line) { if (cmd_line) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; } void iommu_set_default_translated(bool cmd_line) { if (cmd_line) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; iommu_def_domain_type = IOMMU_DOMAIN_DMA; } bool iommu_default_passthrough(void) { return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; } EXPORT_SYMBOL_GPL(iommu_default_passthrough); static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode) { const struct iommu_device *iommu, *ret = NULL; spin_lock(&iommu_device_lock); list_for_each_entry(iommu, &iommu_device_list, list) if (iommu->fwnode == fwnode) { ret = iommu; break; } spin_unlock(&iommu_device_lock); return ret; } const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) { const struct iommu_device *iommu = iommu_from_fwnode(fwnode); return iommu ? iommu->ops : NULL; } int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) { const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); if (!iommu) return driver_deferred_probe_check_state(dev); if (!dev->iommu && !READ_ONCE(iommu->ready)) return -EPROBE_DEFER; if (fwspec) return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; if (!dev_iommu_get(dev)) return -ENOMEM; /* Preallocate for the overwhelmingly common case of 1 ID */ fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) return -ENOMEM; fwnode_handle_get(iommu_fwnode); fwspec->iommu_fwnode = iommu_fwnode; dev_iommu_fwspec_set(dev, fwspec); return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_init); void iommu_fwspec_free(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); if (fwspec) { fwnode_handle_put(fwspec->iommu_fwnode); kfree(fwspec); dev_iommu_fwspec_set(dev, NULL); } } int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); int i, new_num; if (!fwspec) return -EINVAL; new_num = fwspec->num_ids + num_ids; if (new_num > 1) { fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), GFP_KERNEL); if (!fwspec) return -ENOMEM; dev_iommu_fwspec_set(dev, fwspec); } for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; fwspec->num_ids = new_num; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); /** * iommu_setup_default_domain - Set the default_domain for the group * @group: Group to change * @target_type: Domain type to set as the default_domain * * Allocate a default domain and set it as the current domain on the group. If * the group already has a default domain it will be changed to the target_type. * When target_type is 0 the default domain is selected based on driver and * system preferences. */ static int iommu_setup_default_domain(struct iommu_group *group, int target_type) { struct iommu_domain *old_dom = group->default_domain; struct group_device *gdev; struct iommu_domain *dom; bool direct_failed; int req_type; int ret; lockdep_assert_held(&group->mutex); req_type = iommu_get_default_domain_type(group, target_type); if (req_type < 0) return -EINVAL; dom = iommu_group_alloc_default_domain(group, req_type); if (IS_ERR(dom)) return PTR_ERR(dom); if (group->default_domain == dom) return 0; if (iommu_is_dma_domain(dom)) { ret = iommu_get_dma_cookie(dom); if (ret) { iommu_domain_free(dom); return ret; } } /* * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be * mapped before their device is attached, in order to guarantee * continuity with any FW activity */ direct_failed = false; for_each_group_device(group, gdev) { if (iommu_create_device_direct_mappings(dom, gdev->dev)) { direct_failed = true; dev_warn_once( gdev->dev->iommu->iommu_dev->dev, "IOMMU driver was not able to establish FW requested direct mapping."); } } /* We must set default_domain early for __iommu_device_set_domain */ group->default_domain = dom; if (!group->domain) { /* * Drivers are not allowed to fail the first domain attach. * The only way to recover from this is to fail attaching the * iommu driver and call ops->release_device. Put the domain * in group->default_domain so it is freed after. */ ret = __iommu_group_set_domain_internal( group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); if (WARN_ON(ret)) goto out_free_old; } else { ret = __iommu_group_set_domain(group, dom); if (ret) goto err_restore_def_domain; } /* * Drivers are supposed to allow mappings to be installed in a domain * before device attachment, but some don't. Hack around this defect by * trying again after attaching. If this happens it means the device * will not continuously have the IOMMU_RESV_DIRECT map. */ if (direct_failed) { for_each_group_device(group, gdev) { ret = iommu_create_device_direct_mappings(dom, gdev->dev); if (ret) goto err_restore_domain; } } out_free_old: if (old_dom) iommu_domain_free(old_dom); return ret; err_restore_domain: if (old_dom) __iommu_group_set_domain_internal( group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); err_restore_def_domain: if (old_dom) { iommu_domain_free(dom); group->default_domain = old_dom; } return ret; } /* * Changing the default domain through sysfs requires the users to unbind the * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ * transition. Return failure if this isn't met. * * We need to consider the race between this and the device release path. * group->mutex is used here to guarantee that the device release path * will not be entered at the same time. */ static ssize_t iommu_group_store_type(struct iommu_group *group, const char *buf, size_t count) { struct group_device *gdev; int ret, req_type; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; if (WARN_ON(!group) || !group->default_domain) return -EINVAL; if (sysfs_streq(buf, "identity")) req_type = IOMMU_DOMAIN_IDENTITY; else if (sysfs_streq(buf, "DMA")) req_type = IOMMU_DOMAIN_DMA; else if (sysfs_streq(buf, "DMA-FQ")) req_type = IOMMU_DOMAIN_DMA_FQ; else if (sysfs_streq(buf, "auto")) req_type = 0; else return -EINVAL; mutex_lock(&group->mutex); /* We can bring up a flush queue without tearing down the domain. */ if (req_type == IOMMU_DOMAIN_DMA_FQ && group->default_domain->type == IOMMU_DOMAIN_DMA) { ret = iommu_dma_init_fq(group->default_domain); if (ret) goto out_unlock; group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; ret = count; goto out_unlock; } /* Otherwise, ensure that device exists and no driver is bound. */ if (list_empty(&group->devices) || group->owner_cnt) { ret = -EPERM; goto out_unlock; } ret = iommu_setup_default_domain(group, req_type); if (ret) goto out_unlock; /* Make sure dma_ops is appropriatley set */ for_each_group_device(group, gdev) iommu_setup_dma_ops(gdev->dev); out_unlock: mutex_unlock(&group->mutex); return ret ?: count; } /** * iommu_device_use_default_domain() - Device driver wants to handle device * DMA through the kernel DMA API. * @dev: The device. * * The device driver about to bind @dev wants to do DMA through the kernel * DMA API. Return 0 if it is allowed, otherwise an error. */ int iommu_device_use_default_domain(struct device *dev) { /* Caller is the driver core during the pre-probe path */ struct iommu_group *group = dev->iommu_group; int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); /* We may race against bus_iommu_probe() finalising groups here */ if (!group->default_domain) { ret = -EPROBE_DEFER; goto unlock_out; } if (group->owner_cnt) { if (group->domain != group->default_domain || group->owner || !xa_empty(&group->pasid_array)) { ret = -EBUSY; goto unlock_out; } } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); return ret; } /** * iommu_device_unuse_default_domain() - Device driver stops handling device * DMA through the kernel DMA API. * @dev: The device. * * The device driver doesn't want to do DMA through kernel DMA API anymore. * It must be called after iommu_device_use_default_domain(). */ void iommu_device_unuse_default_domain(struct device *dev) { /* Caller is the driver core during the post-probe path */ struct iommu_group *group = dev->iommu_group; if (!group) return; mutex_lock(&group->mutex); if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) group->owner_cnt--; mutex_unlock(&group->mutex); } static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { struct device *dev = iommu_group_first_dev(group); const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (group->blocking_domain) return 0; if (ops->blocked_domain) { group->blocking_domain = ops->blocked_domain; return 0; } /* * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an * empty PAGING domain instead. */ domain = iommu_paging_domain_alloc(dev); if (IS_ERR(domain)) return PTR_ERR(domain); group->blocking_domain = domain; return 0; } static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) { int ret; if ((group->domain && group->domain != group->default_domain) || !xa_empty(&group->pasid_array)) return -EBUSY; ret = __iommu_group_alloc_blocking_domain(group); if (ret) return ret; ret = __iommu_group_set_domain(group, group->blocking_domain); if (ret) return ret; group->owner = owner; group->owner_cnt++; return 0; } /** * iommu_group_claim_dma_owner() - Set DMA ownership of a group * @group: The group. * @owner: Caller specified pointer. Used for exclusive ownership. * * This is to support backward compatibility for vfio which manages the dma * ownership in iommu_group level. New invocations on this interface should be * prohibited. Only a single owner may exist for a group. */ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) { int ret = 0; if (WARN_ON(!owner)) return -EINVAL; mutex_lock(&group->mutex); if (group->owner_cnt) { ret = -EPERM; goto unlock_out; } ret = __iommu_take_dma_ownership(group, owner); unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); /** * iommu_device_claim_dma_owner() - Set DMA ownership of a device * @dev: The device. * @owner: Caller specified pointer. Used for exclusive ownership. * * Claim the DMA ownership of a device. Multiple devices in the same group may * concurrently claim ownership if they present the same owner value. Returns 0 * on success and error code on failure */ int iommu_device_claim_dma_owner(struct device *dev, void *owner) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; int ret = 0; if (WARN_ON(!owner)) return -EINVAL; if (!group) return -ENODEV; mutex_lock(&group->mutex); if (group->owner_cnt) { if (group->owner != owner) { ret = -EPERM; goto unlock_out; } group->owner_cnt++; goto unlock_out; } ret = __iommu_take_dma_ownership(group, owner); unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); static void __iommu_release_dma_ownership(struct iommu_group *group) { if (WARN_ON(!group->owner_cnt || !group->owner || !xa_empty(&group->pasid_array))) return; group->owner_cnt = 0; group->owner = NULL; __iommu_group_set_domain_nofail(group, group->default_domain); } /** * iommu_group_release_dma_owner() - Release DMA ownership of a group * @group: The group * * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). */ void iommu_group_release_dma_owner(struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_release_dma_ownership(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); /** * iommu_device_release_dma_owner() - Release DMA ownership of a device * @dev: The device. * * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). */ void iommu_device_release_dma_owner(struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; mutex_lock(&group->mutex); if (group->owner_cnt > 1) group->owner_cnt--; else __iommu_release_dma_ownership(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); /** * iommu_group_dma_owner_claimed() - Query group dma ownership status * @group: The group. * * This provides status query on a given group. It is racy and only for * non-binding status reporting. */ bool iommu_group_dma_owner_claimed(struct iommu_group *group) { unsigned int user; mutex_lock(&group->mutex); user = group->owner_cnt; mutex_unlock(&group->mutex); return user; } EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *blocked_domain = ops->blocked_domain; WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain, dev, pasid, domain)); } static int __iommu_set_group_pasid(struct iommu_domain *domain, struct iommu_group *group, ioasid_t pasid, struct iommu_domain *old) { struct group_device *device, *last_gdev; int ret; for_each_group_device(group, device) { if (device->dev->iommu->max_pasids > 0) { ret = domain->ops->set_dev_pasid(domain, device->dev, pasid, old); if (ret) goto err_revert; } } return 0; err_revert: last_gdev = device; for_each_group_device(group, device) { if (device == last_gdev) break; if (device->dev->iommu->max_pasids > 0) { /* * If no old domain, undo the succeeded devices/pasid. * Otherwise, rollback the succeeded devices/pasid to * the old domain. And it is a driver bug to fail * attaching with a previously good domain. */ if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev, pasid, domain))) iommu_remove_dev_pasid(device->dev, pasid, domain); } } return ret; } static void __iommu_remove_group_pasid(struct iommu_group *group, ioasid_t pasid, struct iommu_domain *domain) { struct group_device *device; for_each_group_device(group, device) { if (device->dev->iommu->max_pasids > 0) iommu_remove_dev_pasid(device->dev, pasid, domain); } } /* * iommu_attach_device_pasid() - Attach a domain to pasid of device * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. * @handle: the attach handle. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle if it intends to pass a valid handle. * * Return: 0 on success, or an error. */ int iommu_attach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid, struct iommu_attach_handle *handle) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; struct group_device *device; const struct iommu_ops *ops; void *entry; int ret; if (!group) return -ENODEV; ops = dev_iommu_ops(dev); if (!domain->ops->set_dev_pasid || !ops->blocked_domain || !ops->blocked_domain->ops->set_dev_pasid) return -EOPNOTSUPP; if (!domain_iommu_ops_compatible(ops, domain) || pasid == IOMMU_NO_PASID) return -EINVAL; mutex_lock(&group->mutex); for_each_group_device(group, device) { /* * Skip PASID validation for devices without PASID support * (max_pasids = 0). These devices cannot issue transactions * with PASID, so they don't affect group's PASID usage. */ if ((device->dev->iommu->max_pasids > 0) && (pasid >= device->dev->iommu->max_pasids)) { ret = -EINVAL; goto out_unlock; } } entry = iommu_make_pasid_array_entry(domain, handle); /* * Entry present is a failure case. Use xa_insert() instead of * xa_reserve(). */ ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL); if (ret) goto out_unlock; ret = __iommu_set_group_pasid(domain, group, pasid, NULL); if (ret) { xa_release(&group->pasid_array, pasid); goto out_unlock; } /* * The xa_insert() above reserved the memory, and the group->mutex is * held, this cannot fail. The new domain cannot be visible until the * operation succeeds as we cannot tolerate PRIs becoming concurrently * queued and then failing attach. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, pasid, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); /** * iommu_replace_device_pasid - Replace the domain that a specific pasid * of the device is attached to * @domain: the new iommu domain * @dev: the attached device. * @pasid: the pasid of the device. * @handle: the attach handle. * * This API allows the pasid to switch domains. The @pasid should have been * attached. Otherwise, this fails. The pasid will keep the old configuration * if replacement failed. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle if it intends to pass a valid handle. * * Return 0 on success, or an error. */ int iommu_replace_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid, struct iommu_attach_handle *handle) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; struct iommu_attach_handle *entry; struct iommu_domain *curr_domain; void *curr; int ret; if (!group) return -ENODEV; if (!domain->ops->set_dev_pasid) return -EOPNOTSUPP; if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) || pasid == IOMMU_NO_PASID || !handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(domain, handle); curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, XA_ZERO_ENTRY, GFP_KERNEL); if (xa_is_err(curr)) { ret = xa_err(curr); goto out_unlock; } /* * No domain (with or without handle) attached, hence not * a replace case. */ if (!curr) { xa_release(&group->pasid_array, pasid); ret = -EINVAL; goto out_unlock; } /* * Reusing handle is problematic as there are paths that refers * the handle without lock. To avoid race, reject the callers that * attempt it. */ if (curr == entry) { WARN_ON(1); ret = -EINVAL; goto out_unlock; } curr_domain = pasid_array_entry_to_domain(curr); ret = 0; if (curr_domain != domain) { ret = __iommu_set_group_pasid(domain, group, pasid, curr_domain); if (ret) goto out_unlock; } /* * The above xa_cmpxchg() reserved the memory, and the * group->mutex is held, this cannot fail. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, pasid, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_replace_device_pasid, "IOMMUFD_INTERNAL"); /* * iommu_detach_device_pasid() - Detach the domain from pasid of device * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. * * The @domain must have been attached to @pasid of the @dev with * iommu_attach_device_pasid(). */ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; mutex_lock(&group->mutex); __iommu_remove_group_pasid(group, pasid, domain); xa_erase(&group->pasid_array, pasid); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); ioasid_t iommu_alloc_global_pasid(struct device *dev) { int ret; /* max_pasids == 0 means that the device does not support PASID */ if (!dev->iommu->max_pasids) return IOMMU_PASID_INVALID; /* * max_pasids is set up by vendor driver based on number of PASID bits * supported but the IDA allocation is inclusive. */ ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, dev->iommu->max_pasids - 1, GFP_KERNEL); return ret < 0 ? IOMMU_PASID_INVALID : ret; } EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); void iommu_free_global_pasid(ioasid_t pasid) { if (WARN_ON(pasid == IOMMU_PASID_INVALID)) return; ida_free(&iommu_global_pasid_ida, pasid); } EXPORT_SYMBOL_GPL(iommu_free_global_pasid); /** * iommu_attach_handle_get - Return the attach handle * @group: the iommu group that domain was attached to * @pasid: the pasid within the group * @type: matched domain type, 0 for any match * * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. * * Return the attach handle to the caller. The life cycle of an iommu attach * handle is from the time when the domain is attached to the time when the * domain is detached. Callers are required to synchronize the call of * iommu_attach_handle_get() with domain attachment and detachment. The attach * handle can only be used during its life cycle. */ struct iommu_attach_handle * iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) { struct iommu_attach_handle *handle; void *entry; xa_lock(&group->pasid_array); entry = xa_load(&group->pasid_array, pasid); if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) { handle = ERR_PTR(-ENOENT); } else { handle = xa_untag_pointer(entry); if (type && handle->domain->type != type) handle = ERR_PTR(-EBUSY); } xa_unlock(&group->pasid_array); return handle; } EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL"); /** * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * @handle: attach handle * * Returns 0 on success and error code on failure. * * This is a variant of iommu_attach_group(). It allows the caller to provide * an attach handle and use it when the domain is attached. This is currently * used by IOMMUFD to deliver the I/O page faults. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle. */ int iommu_attach_group_handle(struct iommu_domain *domain, struct iommu_group *group, struct iommu_attach_handle *handle) { void *entry; int ret; if (!handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(domain, handle); ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL); if (ret) goto out_unlock; ret = __iommu_attach_group(domain, group); if (ret) { xa_release(&group->pasid_array, IOMMU_NO_PASID); goto out_unlock; } /* * The xa_insert() above reserved the memory, and the group->mutex is * held, this cannot fail. The new domain cannot be visible until the * operation succeeds as we cannot tolerate PRIs becoming concurrently * queued and then failing attach. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, "IOMMUFD_INTERNAL"); /** * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * * Detach the specified IOMMU domain from the specified IOMMU group. * It must be used in conjunction with iommu_attach_group_handle(). */ void iommu_detach_group_handle(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_group_set_core_domain(group); xa_erase(&group->pasid_array, IOMMU_NO_PASID); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL"); /** * iommu_replace_group_handle - replace the domain that a group is attached to * @group: IOMMU group that will be attached to the new domain * @new_domain: new IOMMU domain to replace with * @handle: attach handle * * This API allows the group to switch domains without being forced to go to * the blocking domain in-between. It allows the caller to provide an attach * handle for the new domain and use it when the domain is attached. * * If the currently attached domain is a core domain (e.g. a default_domain), * it will act just like the iommu_attach_group_handle(). * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle. */ int iommu_replace_group_handle(struct iommu_group *group, struct iommu_domain *new_domain, struct iommu_attach_handle *handle) { void *curr, *entry; int ret; if (!new_domain || !handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(new_domain, handle); ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); if (ret) goto err_unlock; ret = __iommu_group_set_domain(group, new_domain); if (ret) goto err_release; curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL); WARN_ON(xa_is_err(curr)); mutex_unlock(&group->mutex); return 0; err_release: xa_release(&group->pasid_array, IOMMU_NO_PASID); err_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL"); #if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU) /** * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain * @desc: MSI descriptor, will store the MSI page * @msi_addr: MSI target address to be mapped * * The implementation of sw_msi() should take msi_addr and map it to * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the * mapping information. * * Return: 0 on success or negative error code if the mapping failed. */ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_group *group = dev->iommu_group; int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); /* An IDENTITY domain must pass through */ if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) { switch (group->domain->cookie_type) { case IOMMU_COOKIE_DMA_MSI: case IOMMU_COOKIE_DMA_IOVA: ret = iommu_dma_sw_msi(group->domain, desc, msi_addr); break; case IOMMU_COOKIE_IOMMUFD: ret = iommufd_sw_msi(group->domain, desc, msi_addr); break; default: ret = -EOPNOTSUPP; break; } } mutex_unlock(&group->mutex); return ret; } #endif /* CONFIG_IRQ_MSI_IOMMU */ |
| 24 35 35 35 35 31 1 31 33 33 32 33 33 1 31 1 33 34 34 33 2 9 9 34 3 24 9 33 34 35 35 35 1 2 33 31 3 3 1 3 3 35 35 35 35 5 31 35 35 5 31 34 35 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 | // SPDX-License-Identifier: GPL-2.0-or-later /* * dir.c * * Creates, reads, walks and deletes directory-nodes * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * Portions of this code from linux/fs/ext3/dir.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/quotaops.h> #include <linux/sort.h> #include <linux/iversion.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "blockcheck.h" #include "dir.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "namei.h" #include "suballoc.h" #include "super.h" #include "sysfile.h" #include "uptodate.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) static int ocfs2_do_extend_dir(struct super_block *sb, handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct buffer_head **new_bh); static int ocfs2_dir_indexed(struct inode *inode); /* * These are distinct checks because future versions of the file system will * want to have a trailing dirent structure independent of indexing. */ static int ocfs2_supports_dir_trailer(struct inode *dir) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir); } /* * "new' here refers to the point at which we're creating a new * directory via "mkdir()", but also when we're expanding an inline * directory. In either case, we don't yet have the indexing bit set * on the directory, so the standard checks will fail in when metaecc * is turned off. Only directory-initialization type functions should * use this then. Everything else wants ocfs2_supports_dir_trailer() */ static int ocfs2_new_dir_wants_trailer(struct inode *dir) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); return ocfs2_meta_ecc(osb) || ocfs2_supports_indexed_dirs(osb); } static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb) { return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer); } #define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb)))) /* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make * them more consistent? */ struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize, void *data) { char *p = data; p += blocksize - sizeof(struct ocfs2_dir_block_trailer); return (struct ocfs2_dir_block_trailer *)p; } /* * XXX: This is executed once on every dirent. We should consider optimizing * it. */ static int ocfs2_skip_dir_trailer(struct inode *dir, struct ocfs2_dir_entry *de, unsigned long offset, unsigned long blklen) { unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer); if (!ocfs2_supports_dir_trailer(dir)) return 0; if (offset != toff) return 0; return 1; } static void ocfs2_init_dir_trailer(struct inode *inode, struct buffer_head *bh, u16 rec_len) { struct ocfs2_dir_block_trailer *trailer; trailer = ocfs2_trailer_from_bh(bh, inode->i_sb); strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); trailer->db_compat_rec_len = cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer)); trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); trailer->db_blkno = cpu_to_le64(bh->b_blocknr); trailer->db_free_rec_len = cpu_to_le16(rec_len); } /* * Link an unindexed block with a dir trailer structure into the index free * list. This function will modify dirdata_bh, but assumes you've already * passed it to the journal. */ static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle, struct buffer_head *dx_root_bh, struct buffer_head *dirdata_bh) { int ret; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_block_trailer *trailer; ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; trailer->db_free_next = dx_root->dr_free_blk; dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); ocfs2_journal_dirty(handle, dx_root_bh); out: return ret; } static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res) { return res->dl_prev_leaf_bh == NULL; } void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res) { brelse(res->dl_dx_root_bh); brelse(res->dl_leaf_bh); brelse(res->dl_dx_leaf_bh); brelse(res->dl_prev_leaf_bh); } static int ocfs2_dir_indexed(struct inode *inode) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL) return 1; return 0; } static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root) { return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE; } /* * Hashing code adapted from ext3 */ #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = msg[i] + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len, struct ocfs2_dx_hinfo *hinfo) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); const char *p; __u32 in[8], buf[4]; /* * XXX: Is this really necessary, if the index is never looked * at by readdir? Is a hash value of '0' a bad idea? */ if ((len == 1 && !strncmp(".", name, 1)) || (len == 2 && !strncmp("..", name, 2))) { buf[0] = buf[1] = 0; goto out; } #ifdef OCFS2_DEBUG_DX_DIRS /* * This makes it very easy to debug indexing problems. We * should never allow this to be selected without hand editing * this file though. */ buf[0] = buf[1] = len; goto out; #endif memcpy(buf, osb->osb_dx_seed, sizeof(buf)); p = name; while (len > 0) { str2hashbuf(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } out: hinfo->major_hash = buf[0]; hinfo->minor_hash = buf[1]; } /* * bh passed here can be an inode block or a dir data block, depending * on the inode inline data flag. */ static int ocfs2_check_dir_entry(struct inode *dir, struct ocfs2_dir_entry *de, struct buffer_head *bh, char *buf, unsigned int size, unsigned long offset) { const char *error_msg = NULL; unsigned long next_offset; int rlen; if (offset > size - OCFS2_DIR_REC_LEN(1)) { /* Dirent is (maybe partially) beyond the buffer * boundaries so touching 'de' members is unsafe. */ mlog(ML_ERROR, "directory entry (#%llu: offset=%lu) " "too close to end or out-of-bounds", (unsigned long long)OCFS2_I(dir)->ip_blkno, offset); return 0; } rlen = le16_to_cpu(de->rec_len); next_offset = ((char *) de - buf) + rlen; if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely(next_offset > size)) error_msg = "directory entry overrun"; else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) && next_offset != size) error_msg = "directory entry too close to end"; if (unlikely(error_msg != NULL)) mlog(ML_ERROR, "bad entry in directory #%llu: %s - " "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, offset, (unsigned long long)le64_to_cpu(de->inode), rlen, de->name_len); return error_msg == NULL ? 1 : 0; } static inline int ocfs2_match(int len, const char * const name, struct ocfs2_dir_entry *de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ static inline int ocfs2_search_dirblock(struct buffer_head *bh, struct inode *dir, const char *name, int namelen, unsigned long offset, char *first_de, unsigned int bytes, struct ocfs2_dir_entry **res_dir) { struct ocfs2_dir_entry *de; char *dlimit, *de_buf; int de_len; int ret = 0; de_buf = first_de; dlimit = de_buf + bytes; while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ de = (struct ocfs2_dir_entry *) de_buf; if (de->name + namelen <= dlimit && ocfs2_match(namelen, name, de)) { /* found a match - just to be sure, do a full check */ if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, offset)) { ret = -1; goto bail; } *res_dir = de; ret = 1; goto bail; } /* prevent looping on a bad block */ de_len = le16_to_cpu(de->rec_len); if (de_len <= 0) { ret = -1; goto bail; } de_buf += de_len; offset += de_len; } bail: trace_ocfs2_search_dirblock(ret); return ret; } static struct buffer_head *ocfs2_find_entry_id(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_entry **res_dir) { int ret, found; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0, data->id_data, i_size_read(dir), res_dir); if (found == 1) return di_bh; brelse(di_bh); out: return NULL; } static int ocfs2_validate_dir_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_dir_block_trailer *trailer = ocfs2_trailer_from_bh(bh, sb); /* * We don't validate dirents here, that's handled * in-place when the code walks them. */ trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. * * Note that we are safe to call this even if the directory * doesn't have a trailer. Filesystems without metaecc will do * nothing, and filesystems with it will have one. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check); if (rc) mlog(ML_ERROR, "Checksum failed for dinode %llu\n", (unsigned long long)bh->b_blocknr); return rc; } /* * Validate a directory trailer. * * We check the trailer here rather than in ocfs2_validate_dir_block() * because that function doesn't have the inode to test. */ static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh) { int rc = 0; struct ocfs2_dir_block_trailer *trailer; trailer = ocfs2_trailer_from_bh(bh, dir->i_sb); if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) { rc = ocfs2_error(dir->i_sb, "Invalid dirblock #%llu: signature = %.*s\n", (unsigned long long)bh->b_blocknr, 7, trailer->db_signature); goto out; } if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) { rc = ocfs2_error(dir->i_sb, "Directory block #%llu has an invalid db_blkno of %llu\n", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(trailer->db_blkno)); goto out; } if (le64_to_cpu(trailer->db_parent_dinode) != OCFS2_I(dir)->ip_blkno) { rc = ocfs2_error(dir->i_sb, "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n", (unsigned long long)bh->b_blocknr, (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)le64_to_cpu(trailer->db_blkno)); goto out; } out: return rc; } /* * This function forces all errors to -EIO for consistency with its * predecessor, ocfs2_bread(). We haven't audited what returning the * real error codes would do to callers. We log the real codes with * mlog_errno() before we squash them. */ static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, struct buffer_head **bh, int flags) { int rc = 0; struct buffer_head *tmp = *bh; rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags, ocfs2_validate_dir_block); if (rc) { mlog_errno(rc); goto out; } if (!(flags & OCFS2_BH_READAHEAD) && ocfs2_supports_dir_trailer(inode)) { rc = ocfs2_check_dir_trailer(inode, tmp); if (rc) { if (!*bh) brelse(tmp); mlog_errno(rc); goto out; } } /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ if (!*bh) *bh = tmp; out: return rc ? -EIO : 0; } /* * Read the block at 'phys' which belongs to this directory * inode. This function does no virtual->physical block translation - * what's passed in is assumed to be a valid directory block. */ static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys, struct buffer_head **bh) { int ret; struct buffer_head *tmp = *bh; ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp, ocfs2_validate_dir_block); if (ret) { mlog_errno(ret); goto out; } if (ocfs2_supports_dir_trailer(dir)) { ret = ocfs2_check_dir_trailer(dir, tmp); if (ret) { if (!*bh) brelse(tmp); mlog_errno(ret); goto out; } } if (!ret && !*bh) *bh = tmp; out: return ret; } static int ocfs2_validate_dx_root(struct super_block *sb, struct buffer_head *bh) { int ret; struct ocfs2_dx_root_block *dx_root; BUG_ON(!buffer_uptodate(bh)); dx_root = (struct ocfs2_dx_root_block *) bh->b_data; ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check); if (ret) { mlog(ML_ERROR, "Checksum failed for dir index root block %llu\n", (unsigned long long)bh->b_blocknr); return ret; } if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) { ret = ocfs2_error(sb, "Dir Index Root # %llu has bad signature %.*s\n", (unsigned long long)le64_to_cpu(dx_root->dr_blkno), 7, dx_root->dr_signature); } return ret; } static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di, struct buffer_head **dx_root_bh) { int ret; u64 blkno = le64_to_cpu(di->i_dx_root); struct buffer_head *tmp = *dx_root_bh; ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, ocfs2_validate_dx_root); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_root_bh) *dx_root_bh = tmp; return ret; } static int ocfs2_validate_dx_leaf(struct super_block *sb, struct buffer_head *bh) { int ret; struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data; BUG_ON(!buffer_uptodate(bh)); ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check); if (ret) { mlog(ML_ERROR, "Checksum failed for dir index leaf block %llu\n", (unsigned long long)bh->b_blocknr); return ret; } if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) { ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n", 7, dx_leaf->dl_signature); } return ret; } static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno, struct buffer_head **dx_leaf_bh) { int ret; struct buffer_head *tmp = *dx_leaf_bh; ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, ocfs2_validate_dx_leaf); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_leaf_bh) *dx_leaf_bh = tmp; return ret; } /* * Read a series of dx_leaf blocks. This expects all buffer_head * pointers to be NULL on function entry. */ static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num, struct buffer_head **dx_leaf_bhs) { int ret; ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0, ocfs2_validate_dx_leaf); if (ret) mlog_errno(ret); return ret; } static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_entry **res_dir) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; unsigned long start, block, b; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; int nblocks, i; sb = dir->i_sb; nblocks = i_size_read(dir) >> sb->s_blocksize_bits; start = OCFS2_I(dir)->ip_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = NULL; ocfs2_read_dir_block(dir, b++, &bh, OCFS2_BH_READAHEAD); bh_use[ra_max] = bh; } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; if (ocfs2_read_dir_block(dir, block, &bh, 0)) { /* read error, skip block & hope for the best. * ocfs2_read_dir_block() has released the bh. */ mlog(ML_ERROR, "reading directory %llu, " "offset %lu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, block); goto next; } i = ocfs2_search_dirblock(bh, dir, name, namelen, block << sb->s_blocksize_bits, bh->b_data, sb->s_blocksize, res_dir); if (i == 1) { OCFS2_I(dir)->ip_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = i_size_read(dir) >> sb->s_blocksize_bits; if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); trace_ocfs2_find_entry_el(ret); return ret; } static int ocfs2_dx_dir_lookup_rec(struct inode *inode, struct ocfs2_extent_list *el, u32 major_hash, u32 *ret_cpos, u64 *ret_phys_blkno, unsigned int *ret_clen) { int ret = 0, i, found; struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_rec *rec = NULL; if (le16_to_cpu(el->l_count) != ocfs2_extent_recs_per_dx_root(inode->i_sb)) { ret = ocfs2_error(inode->i_sb, "Inode %lu has invalid extent list length %u\n", inode->i_ino, le16_to_cpu(el->l_count)); goto out; } if (el->l_tree_depth) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ret = ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in btree tree block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); goto out; } } if (le16_to_cpu(el->l_next_free_rec) == 0) { ret = ocfs2_error(inode->i_sb, "Inode %lu has empty extent list at depth %u\n", inode->i_ino, le16_to_cpu(el->l_tree_depth)); goto out; } found = 0; for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { rec = &el->l_recs[i]; if (le32_to_cpu(rec->e_cpos) <= major_hash) { found = 1; break; } } if (!found) { ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in btree\n", inode->i_ino, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); goto out; } if (ret_phys_blkno) *ret_phys_blkno = le64_to_cpu(rec->e_blkno); if (ret_cpos) *ret_cpos = le32_to_cpu(rec->e_cpos); if (ret_clen) *ret_clen = le16_to_cpu(rec->e_leaf_clusters); out: brelse(eb_bh); return ret; } /* * Returns the block index, from the start of the cluster which this * hash belongs too. */ static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, u32 minor_hash) { return minor_hash & osb->osb_dx_mask; } static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, struct ocfs2_dx_hinfo *hinfo) { return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash); } static int ocfs2_dx_dir_lookup(struct inode *inode, struct ocfs2_extent_list *el, struct ocfs2_dx_hinfo *hinfo, u32 *ret_cpos, u64 *ret_phys_blkno) { int ret = 0; unsigned int cend, clen; u32 cpos; u64 blkno; u32 name_hash = hinfo->major_hash; ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno, &clen); if (ret) { mlog_errno(ret); goto out; } cend = cpos + clen; if (name_hash >= cend) { /* We want the last cluster */ blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1); cpos += clen - 1; } else { blkno += ocfs2_clusters_to_blocks(inode->i_sb, name_hash - cpos); cpos = name_hash; } /* * We now have the cluster which should hold our entry. To * find the exact block from the start of the cluster to * search, we take the lower bits of the hash. */ blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo); if (ret_phys_blkno) *ret_phys_blkno = blkno; if (ret_cpos) *ret_cpos = cpos; out: return ret; } static int ocfs2_dx_dir_search(const char *name, int namelen, struct inode *dir, struct ocfs2_dx_root_block *dx_root, struct ocfs2_dir_lookup_result *res) { int ret, i, found; u64 phys; struct buffer_head *dx_leaf_bh = NULL; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = NULL; struct buffer_head *dir_ent_bh = NULL; struct ocfs2_dir_entry *dir_ent = NULL; struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo; struct ocfs2_extent_list *dr_el; struct ocfs2_dx_entry_list *entry_list; ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo); if (ocfs2_dx_root_inline(dx_root)) { entry_list = &dx_root->dr_entries; goto search; } dr_el = &dx_root->dr_list; ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name, hinfo->major_hash, hinfo->minor_hash, (unsigned long long)phys); ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); if (ret) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; trace_ocfs2_dx_dir_search_leaf_info( le16_to_cpu(dx_leaf->dl_list.de_num_used), le16_to_cpu(dx_leaf->dl_list.de_count)); entry_list = &dx_leaf->dl_list; search: /* * Empty leaf is legal, so no need to check for that. */ found = 0; for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) { dx_entry = &entry_list->de_entries[i]; if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash) || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash)) continue; /* * Search unindexed leaf block now. We're not * guaranteed to find anything. */ ret = ocfs2_read_dir_block_direct(dir, le64_to_cpu(dx_entry->dx_dirent_blk), &dir_ent_bh); if (ret) { mlog_errno(ret); goto out; } /* * XXX: We should check the unindexed block here, * before using it. */ found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen, 0, dir_ent_bh->b_data, dir->i_sb->s_blocksize, &dir_ent); if (found == 1) break; if (found == -1) { /* This means we found a bad directory entry. */ ret = -EIO; mlog_errno(ret); goto out; } brelse(dir_ent_bh); dir_ent_bh = NULL; } if (found <= 0) { ret = -ENOENT; goto out; } res->dl_leaf_bh = dir_ent_bh; res->dl_entry = dir_ent; res->dl_dx_leaf_bh = dx_leaf_bh; res->dl_dx_entry = dx_entry; ret = 0; out: if (ret) { brelse(dx_leaf_bh); brelse(dir_ent_bh); } return ret; } static int ocfs2_find_entry_dx(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { int ret; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; ret = ocfs2_read_dx_root(dir, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup); if (ret) { if (ret != -ENOENT) mlog_errno(ret); goto out; } lookup->dl_dx_root_bh = dx_root_bh; dx_root_bh = NULL; out: brelse(di_bh); brelse(dx_root_bh); return ret; } /* * Try to find an entry of the provided name within 'dir'. * * If nothing was found, -ENOENT is returned. Otherwise, zero is * returned and the struct 'res' will contain information useful to * other directory manipulation functions. * * Caller can NOT assume anything about the contents of the * buffer_heads - they are passed back only so that it can be passed * into any one of the manipulation functions (add entry, delete * entry, etc). As an example, bh in the extent directory case is a * data block, in the inline-data case it actually points to an inode, * in the indexed directory case, multiple buffers are involved. */ int ocfs2_find_entry(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { struct buffer_head *bh; struct ocfs2_dir_entry *res_dir = NULL; int ret = 0; if (ocfs2_dir_indexed(dir)) return ocfs2_find_entry_dx(name, namelen, dir, lookup); if (unlikely(i_size_read(dir) <= 0)) { ret = -EFSCORRUPTED; mlog_errno(ret); goto out; } /* * The unindexed dir code only uses part of the lookup * structure, so there's no reason to push it down further * than this. */ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) { ret = -EFSCORRUPTED; mlog_errno(ret); goto out; } bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir); } else { bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir); } if (bh == NULL) return -ENOENT; lookup->dl_leaf_bh = bh; lookup->dl_entry = res_dir; out: return ret; } /* * Update inode number and type of a previously found directory entry. */ int ocfs2_update_entry(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *res, struct inode *new_entry_inode) { int ret; ocfs2_journal_access_func access = ocfs2_journal_access_db; struct ocfs2_dir_entry *de = res->dl_entry; struct buffer_head *de_bh = res->dl_leaf_bh; /* * The same code works fine for both inline-data and extent * based directories, so no need to split this up. The only * difference is the journal_access function. */ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) access = ocfs2_journal_access_di; ret = access(handle, INODE_CACHE(dir), de_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno); ocfs2_set_de_type(de, new_entry_inode->i_mode); ocfs2_journal_dirty(handle, de_bh); out: return ret; } /* * __ocfs2_delete_entry deletes a directory entry by merging it with the * previous entry */ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh, char *first_de, unsigned int bytes) { struct ocfs2_dir_entry *de, *pde; int i, status = -ENOENT; ocfs2_journal_access_func access = ocfs2_journal_access_db; if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) access = ocfs2_journal_access_di; i = 0; pde = NULL; de = (struct ocfs2_dir_entry *) first_de; while (i < bytes) { if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) { status = -EIO; mlog_errno(status); goto bail; } if (de == de_del) { status = access(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { status = -EIO; mlog_errno(status); goto bail; } if (pde) le16_add_cpu(&pde->rec_len, le16_to_cpu(de->rec_len)); de->inode = 0; inode_inc_iversion(dir); ocfs2_journal_dirty(handle, bh); goto bail; } i += le16_to_cpu(de->rec_len); pde = de; de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); } bail: return status; } static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de) { unsigned int hole; if (le64_to_cpu(de->inode) == 0) hole = le16_to_cpu(de->rec_len); else hole = le16_to_cpu(de->rec_len) - OCFS2_DIR_REC_LEN(de->name_len); return hole; } static int ocfs2_find_max_rec_len(struct super_block *sb, struct buffer_head *dirblock_bh) { int size, this_hole, largest_hole = 0; char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data; struct ocfs2_dir_entry *de; trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb); size = ocfs2_dir_trailer_blk_off(sb); limit = start + size; de_buf = start; de = (struct ocfs2_dir_entry *)de_buf; do { if (de_buf != trailer) { this_hole = ocfs2_figure_dirent_hole(de); if (this_hole > largest_hole) largest_hole = this_hole; } de_buf += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)de_buf; } while (de_buf < limit); if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) return largest_hole; return 0; } static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list, int index) { int num_used = le16_to_cpu(entry_list->de_num_used); if (num_used == 1 || index == (num_used - 1)) goto clear; memmove(&entry_list->de_entries[index], &entry_list->de_entries[index + 1], (num_used - index - 1)*sizeof(struct ocfs2_dx_entry)); clear: num_used--; memset(&entry_list->de_entries[num_used], 0, sizeof(struct ocfs2_dx_entry)); entry_list->de_num_used = cpu_to_le16(num_used); } static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { int ret, index, max_rec_len, add_to_free_list = 0; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; struct buffer_head *leaf_bh = lookup->dl_leaf_bh; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry; struct ocfs2_dir_block_trailer *trailer; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; /* * This function gets a bit messy because we might have to * modify the root block, regardless of whether the indexed * entries are stored inline. */ /* * *Only* set 'entry_list' here, based on where we're looking * for the indexed entries. Later, we might still want to * journal both blocks, based on free list state. */ dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (ocfs2_dx_root_inline(dx_root)) { entry_list = &dx_root->dr_entries; } else { dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data; entry_list = &dx_leaf->dl_list; } /* Neither of these are a disk corruption - that should have * been caught by lookup, before we got here. */ BUG_ON(le16_to_cpu(entry_list->de_count) <= 0); BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0); index = (char *)dx_entry - (char *)entry_list->de_entries; index /= sizeof(*dx_entry); if (index >= le16_to_cpu(entry_list->de_num_used)) { mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, index, entry_list, dx_entry); return -EIO; } /* * We know that removal of this dirent will leave enough room * for a new one, so add this block to the free list if it * isn't already there. */ trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb); if (trailer->db_free_rec_len == 0) add_to_free_list = 1; /* * Add the block holding our index into the journal before * removing the unindexed entry. If we get an error return * from __ocfs2_delete_entry(), then it hasn't removed the * entry yet. Likewise, successful return means we *must* * remove the indexed entry. * * We're also careful to journal the root tree block here as * the entry count needs to be updated. Also, we might be * adding to the start of the free list. */ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (!ocfs2_dx_root_inline(dx_root)) { ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), lookup->dl_dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } } trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno, index); ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, leaf_bh, leaf_bh->b_data, leaf_bh->b_size); if (ret) { mlog_errno(ret); goto out; } max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh); trailer->db_free_rec_len = cpu_to_le16(max_rec_len); if (add_to_free_list) { trailer->db_free_next = dx_root->dr_free_blk; dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr); ocfs2_journal_dirty(handle, dx_root_bh); } /* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */ ocfs2_journal_dirty(handle, leaf_bh); le32_add_cpu(&dx_root->dr_num_entries, -1); ocfs2_journal_dirty(handle, dx_root_bh); ocfs2_dx_list_remove_entry(entry_list, index); if (!ocfs2_dx_root_inline(dx_root)) ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh); out: return ret; } static inline int ocfs2_delete_entry_id(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh) { int ret; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data, i_size_read(dir)); brelse(di_bh); out: return ret; } static inline int ocfs2_delete_entry_el(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh) { return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data, bh->b_size); } /* * Delete a directory entry. Hide the details of directory * implementation from the caller. */ int ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_lookup_result *res) { if (ocfs2_dir_indexed(dir)) return ocfs2_delete_entry_dx(handle, dir, res); if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_delete_entry_id(handle, dir, res->dl_entry, res->dl_leaf_bh); return ocfs2_delete_entry_el(handle, dir, res->dl_entry, res->dl_leaf_bh); } /* * Check whether 'de' has enough room to hold an entry of * 'new_rec_len' bytes. */ static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de, unsigned int new_rec_len) { unsigned int de_really_used; /* Check whether this is an empty record with enough space */ if (le64_to_cpu(de->inode) == 0 && le16_to_cpu(de->rec_len) >= new_rec_len) return 1; /* * Record might have free space at the end which we can * use. */ de_really_used = OCFS2_DIR_REC_LEN(de->name_len); if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len)) return 1; return 0; } static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf, struct ocfs2_dx_entry *dx_new_entry) { int i; i = le16_to_cpu(dx_leaf->dl_list.de_num_used); dx_leaf->dl_list.de_entries[i] = *dx_new_entry; le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1); } static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk) { int i; struct ocfs2_dx_entry *dx_entry; i = le16_to_cpu(entry_list->de_num_used); dx_entry = &entry_list->de_entries[i]; memset(dx_entry, 0, sizeof(*dx_entry)); dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash); dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash); dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk); le16_add_cpu(&entry_list->de_num_used, 1); } static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk, struct buffer_head *dx_leaf_bh) { int ret; struct ocfs2_dx_leaf *dx_leaf; ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk); ocfs2_journal_dirty(handle, dx_leaf_bh); out: return ret; } static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk, struct ocfs2_dx_root_block *dx_root) { ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk); } static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { int ret = 0; struct ocfs2_dx_root_block *dx_root; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data; if (ocfs2_dx_root_inline(dx_root)) { ocfs2_dx_inline_root_insert(dir, handle, &lookup->dl_hinfo, lookup->dl_leaf_bh->b_blocknr, dx_root); } else { ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo, lookup->dl_leaf_bh->b_blocknr, lookup->dl_dx_leaf_bh); if (ret) goto out; } le32_add_cpu(&dx_root->dr_num_entries, 1); ocfs2_journal_dirty(handle, dx_root_bh); out: return ret; } static void ocfs2_remove_block_from_free_list(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { struct ocfs2_dir_block_trailer *trailer, *prev; struct ocfs2_dx_root_block *dx_root; struct buffer_head *bh; trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); if (ocfs2_free_list_at_root(lookup)) { bh = lookup->dl_dx_root_bh; dx_root = (struct ocfs2_dx_root_block *)bh->b_data; dx_root->dr_free_blk = trailer->db_free_next; } else { bh = lookup->dl_prev_leaf_bh; prev = ocfs2_trailer_from_bh(bh, dir->i_sb); prev->db_free_next = trailer->db_free_next; } trailer->db_free_rec_len = cpu_to_le16(0); trailer->db_free_next = cpu_to_le64(0); ocfs2_journal_dirty(handle, bh); ocfs2_journal_dirty(handle, lookup->dl_leaf_bh); } /* * This expects that a journal write has been reserved on * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh */ static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { int max_rec_len; struct ocfs2_dir_block_trailer *trailer; /* Walk dl_leaf_bh to figure out what the new free rec_len is. */ max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh); if (max_rec_len) { /* * There's still room in this block, so no need to remove it * from the free list. In this case, we just want to update * the rec len accounting. */ trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); trailer->db_free_rec_len = cpu_to_le16(max_rec_len); ocfs2_journal_dirty(handle, lookup->dl_leaf_bh); } else { ocfs2_remove_block_from_free_list(dir, handle, lookup); } } /* we don't always have a dentry for what we want to add, so people * like orphan dir can call this instead. * * The lookup context must have been filled from * ocfs2_prepare_dir_for_insert. */ int __ocfs2_add_entry(handle_t *handle, struct inode *dir, const char *name, int namelen, struct inode *inode, u64 blkno, struct buffer_head *parent_fe_bh, struct ocfs2_dir_lookup_result *lookup) { unsigned long offset; unsigned short rec_len; struct ocfs2_dir_entry *de, *de1; struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data; struct super_block *sb = dir->i_sb; int retval; unsigned int size = sb->s_blocksize; struct buffer_head *insert_bh = lookup->dl_leaf_bh; char *data_start = insert_bh->b_data; if (ocfs2_dir_indexed(dir)) { struct buffer_head *bh; /* * An indexed dir may require that we update the free space * list. Reserve a write to the previous node in the list so * that we don't fail later. * * XXX: This can be either a dx_root_block, or an unindexed * directory tree leaf block. */ if (ocfs2_free_list_at_root(lookup)) { bh = lookup->dl_dx_root_bh; retval = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } else { bh = lookup->dl_prev_leaf_bh; retval = ocfs2_journal_access_db(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } if (retval) { mlog_errno(retval); return retval; } } else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { data_start = di->id2.i_data.id_data; size = i_size_read(dir); BUG_ON(insert_bh != parent_fe_bh); } rec_len = OCFS2_DIR_REC_LEN(namelen); offset = 0; de = (struct ocfs2_dir_entry *) data_start; while (1) { BUG_ON((char *)de >= (size + data_start)); /* These checks should've already been passed by the * prepare function, but I guess we can leave them * here anyway. */ if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start, size, offset)) { retval = -ENOENT; goto bail; } if (ocfs2_match(namelen, name, de)) { retval = -EEXIST; goto bail; } /* We're guaranteed that we should have space, so we * can't possibly have hit the trailer...right? */ mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size), "Hit dir trailer trying to insert %.*s " "(namelen %d) into directory %llu. " "offset is %lu, trailer offset is %d\n", namelen, name, namelen, (unsigned long long)parent_fe_bh->b_blocknr, offset, ocfs2_dir_trailer_blk_off(dir->i_sb)); if (ocfs2_dirent_would_fit(de, rec_len)) { inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); if (retval < 0) { mlog_errno(retval); goto bail; } if (insert_bh == parent_fe_bh) retval = ocfs2_journal_access_di(handle, INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); else { retval = ocfs2_journal_access_db(handle, INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (!retval && ocfs2_dir_indexed(dir)) retval = ocfs2_dx_dir_insert(dir, handle, lookup); } if (retval) { mlog_errno(retval); goto bail; } /* By now the buffer is marked for journaling */ offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { de1 = (struct ocfs2_dir_entry *)((char *) de + OCFS2_DIR_REC_LEN(de->name_len)); de1->rec_len = cpu_to_le16(le16_to_cpu(de->rec_len) - OCFS2_DIR_REC_LEN(de->name_len)); de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); de = de1; } de->file_type = FT_UNKNOWN; if (blkno) { de->inode = cpu_to_le64(blkno); ocfs2_set_de_type(de, inode->i_mode); } else de->inode = 0; de->name_len = namelen; memcpy(de->name, name, namelen); if (ocfs2_dir_indexed(dir)) ocfs2_recalc_free_list(dir, handle, lookup); inode_inc_iversion(dir); ocfs2_journal_dirty(handle, insert_bh); retval = 0; goto bail; } offset += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len)); } /* when you think about it, the assert above should prevent us * from ever getting here. */ retval = -ENOSPC; bail: if (retval) mlog_errno(retval); return retval; } static int ocfs2_dir_foreach_blk_id(struct inode *inode, u64 *f_version, struct dir_context *ctx) { int ret, i; unsigned long offset = ctx->pos; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; struct ocfs2_dir_entry *de; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog(ML_ERROR, "Unable to read inode block for dir %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; while (ctx->pos < i_size_read(inode)) { /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ if (!inode_eq_iversion(inode, *f_version)) { for (i = 0; i < i_size_read(inode) && i < offset; ) { de = (struct ocfs2_dir_entry *) (data->id_data + i); /* It's too expensive to do a full * dirent test each time round this * loop, but we do have to test at * least that it is non-zero. A * failure will be detected in the * dirent test below. */ if (le16_to_cpu(de->rec_len) < OCFS2_DIR_REC_LEN(1)) break; i += le16_to_cpu(de->rec_len); } ctx->pos = offset = i; *f_version = inode_query_iversion(inode); } de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos); if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data, i_size_read(inode), ctx->pos)) { /* On error, skip the f_pos to the end. */ ctx->pos = i_size_read(inode); break; } offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { if (!dir_emit(ctx, de->name, de->name_len, le64_to_cpu(de->inode), fs_ftype_to_dtype(de->file_type))) goto out; } ctx->pos += le16_to_cpu(de->rec_len); } out: brelse(di_bh); return 0; } /* * NOTE: This function can be called against unindexed directories, * and indexed ones. */ static int ocfs2_dir_foreach_blk_el(struct inode *inode, u64 *f_version, struct dir_context *ctx, bool persist) { unsigned long offset, blk, last_ra_blk = 0; int i; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; int stored = 0; bh = NULL; offset = ctx->pos & (sb->s_blocksize - 1); while (ctx->pos < i_size_read(inode)) { blk = ctx->pos >> sb->s_blocksize_bits; if (ocfs2_read_dir_block(inode, blk, &bh, 0)) { /* Skip the corrupt dirblock and keep trying */ ctx->pos += sb->s_blocksize - offset; continue; } /* The idea here is to begin with 8k read-ahead and to stay * 4k ahead of our current position. * * TODO: Use the pagecache for this. We just need to * make sure it's cluster-safe... */ if (!last_ra_blk || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { for (i = ra_sectors >> (sb->s_blocksize_bits - 9); i > 0; i--) { tmp = NULL; if (!ocfs2_read_dir_block(inode, ++blk, &tmp, OCFS2_BH_READAHEAD)) brelse(tmp); } last_ra_blk = blk; ra_sectors = 8; } /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ if (!inode_eq_iversion(inode, *f_version)) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ocfs2_dir_entry *) (bh->b_data + i); /* It's too expensive to do a full * dirent test each time round this * loop, but we do have to test at * least that it is non-zero. A * failure will be detected in the * dirent test below. */ if (le16_to_cpu(de->rec_len) < OCFS2_DIR_REC_LEN(1)) break; i += le16_to_cpu(de->rec_len); } offset = i; ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) | offset; *f_version = inode_query_iversion(inode); } while (ctx->pos < i_size_read(inode) && offset < sb->s_blocksize) { de = (struct ocfs2_dir_entry *) (bh->b_data + offset); if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data, sb->s_blocksize, offset)) { /* On error, skip the f_pos to the next block. */ ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; break; } if (le64_to_cpu(de->inode)) { if (!dir_emit(ctx, de->name, de->name_len, le64_to_cpu(de->inode), fs_ftype_to_dtype(de->file_type))) { brelse(bh); return 0; } stored++; } offset += le16_to_cpu(de->rec_len); ctx->pos += le16_to_cpu(de->rec_len); } offset = 0; brelse(bh); bh = NULL; if (!persist && stored) break; } return 0; } static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, struct dir_context *ctx, bool persist) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_dir_foreach_blk_id(inode, f_version, ctx); return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist); } /* * This is intended to be called from inside other kernel functions, * so we fake some arguments. */ int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx) { u64 version = inode_query_iversion(inode); ocfs2_dir_foreach_blk(inode, &version, ctx, true); return 0; } /* * ocfs2_readdir() * */ int ocfs2_readdir(struct file *file, struct dir_context *ctx) { int error = 0; struct inode *inode = file_inode(file); struct ocfs2_file_private *fp = file->private_data; int lock_level = 0; trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1); if (lock_level && error >= 0) { /* We release EX lock which used to update atime * and get PR lock again to reduce contention * on commonly accessed directories. */ ocfs2_inode_unlock(inode, 1); lock_level = 0; error = ocfs2_inode_lock(inode, NULL, 0); } if (error < 0) { if (error != -ENOENT) mlog_errno(error); /* we haven't got any yet, so propagate the error. */ goto bail_nolock; } error = ocfs2_dir_foreach_blk(inode, &fp->cookie, ctx, false); ocfs2_inode_unlock(inode, lock_level); if (error) mlog_errno(error); bail_nolock: return error; } /* * NOTE: this should always be called with parent dir i_rwsem taken. */ int ocfs2_find_files_on_disk(const char *name, int namelen, u64 *blkno, struct inode *inode, struct ocfs2_dir_lookup_result *lookup) { int status = -ENOENT; trace_ocfs2_find_files_on_disk(namelen, name, blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); status = ocfs2_find_entry(name, namelen, inode, lookup); if (status) goto leave; *blkno = le64_to_cpu(lookup->dl_entry->inode); status = 0; leave: return status; } /* * Convenience function for callers which just want the block number * mapped to a name and don't require the full dirent info, etc. */ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, int namelen, u64 *blkno) { int ret; struct ocfs2_dir_lookup_result lookup = { NULL, }; ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup); ocfs2_free_dir_lookup_result(&lookup); return ret; } /* Check for a name within a directory. * * Return 0 if the name does not exist * Return -EEXIST if the directory contains the name * Return -EFSCORRUPTED if found corruption * * Callers should have i_rwsem + a cluster lock on dir */ int ocfs2_check_dir_for_entry(struct inode *dir, const char *name, int namelen) { int ret = 0; struct ocfs2_dir_lookup_result lookup = { NULL, }; trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); ret = ocfs2_find_entry(name, namelen, dir, &lookup); if (ret == 0) { ret = -EEXIST; mlog_errno(ret); } else if (ret == -ENOENT) { ret = 0; } ocfs2_free_dir_lookup_result(&lookup); return ret; } struct ocfs2_empty_dir_priv { struct dir_context ctx; unsigned seen_dot; unsigned seen_dot_dot; unsigned seen_other; unsigned dx_dir; }; static bool ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name, int name_len, loff_t pos, u64 ino, unsigned type) { struct ocfs2_empty_dir_priv *p = container_of(ctx, struct ocfs2_empty_dir_priv, ctx); /* * Check the positions of "." and ".." records to be sure * they're in the correct place. * * Indexed directories don't need to proceed past the first * two entries, so we end the scan after seeing '..'. Despite * that, we allow the scan to proceed In the event that we * have a corrupted indexed directory (no dot or dot dot * entries). This allows us to double check for existing * entries which might not have been found in the index. */ if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) { p->seen_dot = 1; return true; } if (name_len == 2 && !strncmp("..", name, 2) && pos == OCFS2_DIR_REC_LEN(1)) { p->seen_dot_dot = 1; if (p->dx_dir && p->seen_dot) return false; return true; } p->seen_other = 1; return false; } static int ocfs2_empty_dir_dx(struct inode *inode, struct ocfs2_empty_dir_priv *priv) { int ret; struct buffer_head *di_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_dx_root_block *dx_root; priv->dx_dir = 1; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; ret = ocfs2_read_dx_root(inode, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (le32_to_cpu(dx_root->dr_num_entries) != 2) priv->seen_other = 1; out: brelse(di_bh); brelse(dx_root_bh); return ret; } /* * routine to check that the specified directory is empty (for rmdir) * * Returns 1 if dir is empty, zero otherwise. * * XXX: This is a performance problem for unindexed directories. */ int ocfs2_empty_dir(struct inode *inode) { int ret; struct ocfs2_empty_dir_priv priv = { .ctx.actor = ocfs2_empty_dir_filldir, }; if (ocfs2_dir_indexed(inode)) { ret = ocfs2_empty_dir_dx(inode, &priv); if (ret) mlog_errno(ret); /* * We still run ocfs2_dir_foreach to get the checks * for "." and "..". */ } ret = ocfs2_dir_foreach(inode, &priv.ctx); if (ret) mlog_errno(ret); if (!priv.seen_dot || !priv.seen_dot_dot) { mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); /* * XXX: Is it really safe to allow an unlink to continue? */ return 1; } return !priv.seen_other; } /* * Fills "." and ".." dirents in a new directory block. Returns dirent for * "..", which might be used during creation of a directory with a trailing * header. It is otherwise safe to ignore the return code. */ static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode, struct inode *parent, char *start, unsigned int size) { struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); de->name_len = 1; de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); strcpy(de->name, "."); ocfs2_set_de_type(de, S_IFDIR); de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len)); de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno); de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1)); de->name_len = 2; strcpy(de->name, ".."); ocfs2_set_de_type(de, S_IFDIR); return de; } /* * This works together with code in ocfs2_mknod_locked() which sets * the inline-data flag and initializes the inline-data section. */ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *di_bh) { int ret; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *data = &di->id2.i_data; unsigned int size = le16_to_cpu(data->id_count); ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ocfs2_fill_initial_dirents(inode, parent, data->id_data, size); ocfs2_journal_dirty(handle, di_bh); i_size_write(inode, size); set_nlink(inode, 2); inode->i_blocks = ocfs2_inode_sector_count(inode); ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); if (ret < 0) mlog_errno(ret); out: return ret; } static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac, struct buffer_head **ret_new_bh) { int status; unsigned int size = osb->sb->s_blocksize; struct buffer_head *new_bh = NULL; struct ocfs2_dir_entry *de; if (ocfs2_new_dir_wants_trailer(inode)) size = ocfs2_dir_trailer_blk_off(parent->i_sb); status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh, data_ac, NULL, &new_bh); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } memset(new_bh->b_data, 0, osb->sb->s_blocksize); de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size); if (ocfs2_new_dir_wants_trailer(inode)) { int size = le16_to_cpu(de->rec_len); /* * Figure out the size of the hole left over after * insertion of '.' and '..'. The trailer wants this * information. */ size -= OCFS2_DIR_REC_LEN(2); size -= sizeof(struct ocfs2_dir_block_trailer); ocfs2_init_dir_trailer(inode, new_bh, size); } ocfs2_journal_dirty(handle, new_bh); i_size_write(inode, inode->i_sb->s_blocksize); set_nlink(inode, 2); inode->i_blocks = ocfs2_inode_sector_count(inode); status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); if (status < 0) { mlog_errno(status); goto bail; } status = 0; if (ret_new_bh) { *ret_new_bh = new_bh; new_bh = NULL; } bail: brelse(new_bh); return status; } static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, handle_t *handle, struct inode *dir, struct buffer_head *di_bh, struct buffer_head *dirdata_bh, struct ocfs2_alloc_context *meta_ac, int dx_inline, u32 num_entries, struct buffer_head **ret_dx_root_bh) { int ret; struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; u16 dr_suballoc_bit; u64 suballoc_loc, dr_blkno; unsigned int num_bits; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_block_trailer *trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, &dr_suballoc_bit, &num_bits, &dr_blkno); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_dx_dir_attach_index( (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)dr_blkno); dx_root_bh = sb_getblk(osb->sb, dr_blkno); if (dx_root_bh == NULL) { ret = -ENOMEM; goto out; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh); ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; memset(dx_root, 0, osb->sb->s_blocksize); strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc); dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); dx_root->dr_blkno = cpu_to_le64(dr_blkno); dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno); dx_root->dr_num_entries = cpu_to_le32(num_entries); if (le16_to_cpu(trailer->db_free_rec_len)) dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); else dx_root->dr_free_blk = cpu_to_le64(0); if (dx_inline) { dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE; dx_root->dr_entries.de_count = cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb)); } else { dx_root->dr_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb)); } ocfs2_journal_dirty(handle, dx_root_bh); ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out; } di->i_dx_root = cpu_to_le64(dr_blkno); spin_lock(&OCFS2_I(dir)->ip_lock); OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL; di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); spin_unlock(&OCFS2_I(dir)->ip_lock); ocfs2_journal_dirty(handle, di_bh); *ret_dx_root_bh = dx_root_bh; dx_root_bh = NULL; out: brelse(dx_root_bh); return ret; } static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, handle_t *handle, struct inode *dir, struct buffer_head **dx_leaves, int num_dx_leaves, u64 start_blk) { int ret, i; struct ocfs2_dx_leaf *dx_leaf; struct buffer_head *bh; for (i = 0; i < num_dx_leaves; i++) { bh = sb_getblk(osb->sb, start_blk + i); if (bh == NULL) { ret = -ENOMEM; goto out; } dx_leaves[i] = bh; ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh); ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data; memset(dx_leaf, 0, osb->sb->s_blocksize); strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE); dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation); dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr); dx_leaf->dl_list.de_count = cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); trace_ocfs2_dx_dir_format_cluster( (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)bh->b_blocknr, le16_to_cpu(dx_leaf->dl_list.de_count)); ocfs2_journal_dirty(handle, bh); } ret = 0; out: return ret; } /* * Allocates and formats a new cluster for use in an indexed dir * leaf. This version will not do the extent insert, so that it can be * used by operations which need careful ordering. */ static int __ocfs2_dx_dir_new_cluster(struct inode *dir, u32 cpos, handle_t *handle, struct ocfs2_alloc_context *data_ac, struct buffer_head **dx_leaves, int num_dx_leaves, u64 *ret_phys_blkno) { int ret; u32 phys, num; u64 phys_blkno; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); /* * XXX: For create, this should claim cluster for the index * *before* the unindexed insert so that we have a better * chance of contiguousness as the directory grows in number * of entries. */ ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num); if (ret) { mlog_errno(ret); goto out; } /* * Format the new cluster first. That way, we're inserting * valid data. */ phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys); ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves, num_dx_leaves, phys_blkno); if (ret) { mlog_errno(ret); goto out; } *ret_phys_blkno = phys_blkno; out: return ret; } static int ocfs2_dx_dir_new_cluster(struct inode *dir, struct ocfs2_extent_tree *et, u32 cpos, handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct buffer_head **dx_leaves, int num_dx_leaves) { int ret; u64 phys_blkno; ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves, num_dx_leaves, &phys_blkno); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0, meta_ac); if (ret) mlog_errno(ret); out: return ret; } static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb, int *ret_num_leaves) { int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1); struct buffer_head **dx_leaves; dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *), GFP_NOFS); if (dx_leaves && ret_num_leaves) *ret_num_leaves = num_dx_leaves; return dx_leaves; } static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *di_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac) { int ret; struct buffer_head *leaf_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_hinfo hinfo; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; /* * Our strategy is to create the directory as though it were * unindexed, then add the index block. This works with very * little complication since the state of a new directory is a * very well known quantity. * * Essentially, we have two dirents ("." and ".."), in the 1st * block which need indexing. These are easily inserted into * the index block. */ ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh, data_ac, &leaf_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh, meta_ac, 1, 2, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; entry_list = &dx_root->dr_entries; /* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */ ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo); ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr); ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo); ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr); out: brelse(dx_root_bh); brelse(leaf_bh); return ret; } int ocfs2_fill_new_dir(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac) { BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL); if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh); if (ocfs2_supports_indexed_dirs(osb)) return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh, data_ac, meta_ac); return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh, data_ac, NULL); } static int ocfs2_dx_dir_index_block(struct inode *dir, handle_t *handle, struct buffer_head **dx_leaves, int num_dx_leaves, u32 *num_dx_entries, struct buffer_head *dirent_bh) { int ret = 0, namelen, i; char *de_buf, *limit; struct ocfs2_dir_entry *de; struct buffer_head *dx_leaf_bh; struct ocfs2_dx_hinfo hinfo; u64 dirent_blk = dirent_bh->b_blocknr; de_buf = dirent_bh->b_data; limit = de_buf + dir->i_sb->s_blocksize; while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; namelen = de->name_len; if (!namelen || !de->inode) goto inc; ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo); i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo); dx_leaf_bh = dx_leaves[i]; ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo, dirent_blk, dx_leaf_bh); if (ret) { mlog_errno(ret); goto out; } *num_dx_entries = *num_dx_entries + 1; inc: de_buf += le16_to_cpu(de->rec_len); } out: return ret; } /* * XXX: This expects dx_root_bh to already be part of the transaction. */ static void ocfs2_dx_dir_index_root_block(struct inode *dir, struct buffer_head *dx_root_bh, struct buffer_head *dirent_bh) { char *de_buf, *limit; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_entry *de; struct ocfs2_dx_hinfo hinfo; u64 dirent_blk = dirent_bh->b_blocknr; dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; de_buf = dirent_bh->b_data; limit = de_buf + dir->i_sb->s_blocksize; while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; if (!de->name_len || !de->inode) goto inc; ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); trace_ocfs2_dx_dir_index_root_block( (unsigned long long)dir->i_ino, hinfo.major_hash, hinfo.minor_hash, de->name_len, de->name, le16_to_cpu(dx_root->dr_entries.de_num_used)); ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, dirent_blk); le32_add_cpu(&dx_root->dr_num_entries, 1); inc: de_buf += le16_to_cpu(de->rec_len); } } /* * Count the number of inline directory entries in di_bh and compare * them against the number of entries we can hold in an inline dx root * block. */ static int ocfs2_new_dx_should_be_inline(struct inode *dir, struct buffer_head *di_bh) { int dirent_count = 0; char *de_buf, *limit; struct ocfs2_dir_entry *de; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; de_buf = di->id2.i_data.id_data; limit = de_buf + i_size_read(dir); while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; if (de->name_len && de->inode) dirent_count++; de_buf += le16_to_cpu(de->rec_len); } /* We are careful to leave room for one extra record. */ return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb); } /* * Expand rec_len of the rightmost dirent in a directory block so that it * contains the end of our valid space for dirents. We do this during * expansion from an inline directory to one with extents. The first dir block * in that case is taken from the inline data portion of the inode block. * * This will also return the largest amount of contiguous space for a dirent * in the block. That value is *not* necessarily the last dirent, even after * expansion. The directory indexing code wants this value for free space * accounting. We do this here since we're already walking the entire dir * block. * * We add the dir trailer if this filesystem wants it. */ static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size, struct inode *dir) { struct super_block *sb = dir->i_sb; struct ocfs2_dir_entry *de; struct ocfs2_dir_entry *prev_de; char *de_buf, *limit; unsigned int new_size = sb->s_blocksize; unsigned int bytes, this_hole; unsigned int largest_hole = 0; if (ocfs2_new_dir_wants_trailer(dir)) new_size = ocfs2_dir_trailer_blk_off(sb); bytes = new_size - old_size; limit = start + old_size; de_buf = start; de = (struct ocfs2_dir_entry *)de_buf; do { this_hole = ocfs2_figure_dirent_hole(de); if (this_hole > largest_hole) largest_hole = this_hole; prev_de = de; de_buf += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)de_buf; } while (de_buf < limit); le16_add_cpu(&prev_de->rec_len, bytes); /* We need to double check this after modification of the final * dirent. */ this_hole = ocfs2_figure_dirent_hole(prev_de); if (this_hole > largest_hole) largest_hole = this_hole; if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) return largest_hole; return 0; } /* * We allocate enough clusters to fulfill "blocks_wanted", but set * i_size to exactly one block. Ocfs2_extend_dir() will handle the * rest automatically for us. * * *first_block_bh is a pointer to the 1st data block allocated to the * directory. */ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, unsigned int blocks_wanted, struct ocfs2_dir_lookup_result *lookup, struct buffer_head **first_block_bh) { u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0; struct super_block *sb = dir->i_sb; int ret, i, num_dx_leaves = 0, dx_inline = 0, credits = ocfs2_inline_to_extents_credits(sb); u64 dx_insert_blkno, blkno, bytes = blocks_wanted << sb->s_blocksize_bits; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(dir); struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; struct buffer_head *dirdata_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct buffer_head **dx_leaves = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; handle_t *handle; struct ocfs2_extent_tree et; struct ocfs2_extent_tree dx_et; int did_quota = 0, bytes_allocated = 0; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh); alloc = ocfs2_clusters_for_bytes(sb, bytes); dx_alloc = 0; down_write(&oi->ip_alloc_sem); if (ocfs2_supports_indexed_dirs(osb)) { credits += ocfs2_add_dir_index_credits(sb); dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh); if (!dx_inline) { /* Add one more cluster for an index leaf */ dx_alloc++; dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb, &num_dx_leaves); if (!dx_leaves) { ret = -ENOMEM; mlog_errno(ret); goto out; } } /* This gets us the dx_root */ ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); if (ret) { mlog_errno(ret); goto out; } } /* * We should never need more than 2 clusters for the unindexed * tree - maximum dirent size is far less than one block. In * fact, the only time we'd need more than one cluster is if * blocksize == clustersize and the dirent won't fit in the * extra space that the expansion to a single block gives. As * of today, that only happens on 4k/4k file systems. */ BUG_ON(alloc > 2); ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac); if (ret) { mlog_errno(ret); goto out; } /* * Prepare for worst case allocation scenario of two separate * extents in the unindexed tree. */ if (alloc == 2) credits += OCFS2_SUBALLOC_ALLOC; handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = dquot_alloc_space_nodirty(dir, ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc)); if (ret) goto out_commit; did_quota = 1; if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { /* * Allocate our index cluster first, to maximize the * possibility that unindexed leaves grow * contiguously. */ ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves, num_dx_leaves, &dx_insert_blkno); if (ret) { mlog_errno(ret); goto out_commit; } bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1); } /* * Try to claim as many clusters as the bitmap can give though * if we only get one now, that's enough to continue. The rest * will be claimed after the conversion to extents. */ if (ocfs2_dir_resv_allowed(osb)) data_ac->ac_resv = &oi->ip_la_data_resv; ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len); if (ret) { mlog_errno(ret); goto out_commit; } bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1); /* * Operations are carefully ordered so that we set up the new * data block first. The conversion from inline data to * extents follows. */ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); dirdata_bh = sb_getblk(sb, blkno); if (!dirdata_bh) { ret = -ENOMEM; mlog_errno(ret); goto out_commit; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh); ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir)); memset(dirdata_bh->b_data + i_size_read(dir), 0, sb->s_blocksize - i_size_read(dir)); i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir); if (ocfs2_new_dir_wants_trailer(dir)) { /* * Prepare the dir trailer up front. It will otherwise look * like a valid dirent. Even if inserting the index fails * (unlikely), then all we'll have done is given first dir * block a small amount of fragmentation. */ ocfs2_init_dir_trailer(dir, dirdata_bh, i); } ocfs2_update_inode_fsync_trans(handle, dir, 1); ocfs2_journal_dirty(handle, dirdata_bh); if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { /* * Dx dirs with an external cluster need to do this up * front. Inline dx root's get handled later, after * we've allocated our root block. We get passed back * a total number of items so that dr_num_entries can * be correctly set once the dx_root has been * allocated. */ ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves, num_dx_leaves, &num_dx_entries, dirdata_bh); if (ret) { mlog_errno(ret); goto out_commit; } } /* * Set extent, i_size, etc on the directory. After this, the * inode should contain the same exact dirents as before and * be fully accessible from system calls. * * We let the later dirent insert modify c/mtime - to the user * the data hasn't changed. */ ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_dinode_new_extent_list(dir, di); i_size_write(dir, sb->s_blocksize); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); di->i_size = cpu_to_le64(sb->s_blocksize); di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir)); di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir)); ocfs2_update_inode_fsync_trans(handle, dir, 1); /* * This should never fail as our extent list is empty and all * related blocks have been journaled already. */ ret = ocfs2_insert_extent(handle, &et, 0, blkno, len, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; } /* * Set i_blocks after the extent insert for the most up to * date ip_clusters value. */ dir->i_blocks = ocfs2_inode_sector_count(dir); ocfs2_journal_dirty(handle, di_bh); if (ocfs2_supports_indexed_dirs(osb)) { ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh, dirdata_bh, meta_ac, dx_inline, num_dx_entries, &dx_root_bh); if (ret) { mlog_errno(ret); goto out_commit; } if (dx_inline) { ocfs2_dx_dir_index_root_block(dir, dx_root_bh, dirdata_bh); |