805 801 97 422 754 684 737 148 121 144 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/mutex.h> #include <net/sock.h> #include "nf_internals.h" /* Sockopts only registered and called from user context, so net locking would be overkill. Also, [gs]etsockopt calls may sleep. */ static DEFINE_MUTEX(nf_sockopt_mutex); static LIST_HEAD(nf_sockopts); /* Do exclusive ranges overlap? */ static inline int overlap(int min1, int max1, int min2, int max2) { return max1 > min2 && min1 < max2; } /* Functions to register sockopt ranges (exclusive). */ int nf_register_sockopt(struct nf_sockopt_ops *reg) { struct nf_sockopt_ops *ops; int ret = 0; mutex_lock(&nf_sockopt_mutex); list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == reg->pf && (overlap(ops->set_optmin, ops->set_optmax, reg->set_optmin, reg->set_optmax) || overlap(ops->get_optmin, ops->get_optmax, reg->get_optmin, reg->get_optmax))) { pr_debug("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", ops->set_optmin, ops->set_optmax, ops->get_optmin, ops->get_optmax, reg->set_optmin, reg->set_optmax, reg->get_optmin, reg->get_optmax); ret = -EBUSY; goto out; } } list_add(®->list, &nf_sockopts); out: mutex_unlock(&nf_sockopt_mutex); return ret; } EXPORT_SYMBOL(nf_register_sockopt); void nf_unregister_sockopt(struct nf_sockopt_ops *reg) { mutex_lock(&nf_sockopt_mutex); list_del(®->list); mutex_unlock(&nf_sockopt_mutex); } EXPORT_SYMBOL(nf_unregister_sockopt); static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, int val, int get) { struct nf_sockopt_ops *ops; mutex_lock(&nf_sockopt_mutex); list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == pf) { if (!try_module_get(ops->owner)) goto out_nosup; if (get) { if (val >= ops->get_optmin && val < ops->get_optmax) goto out; } else { if (val >= ops->set_optmin && val < ops->set_optmax) goto out; } module_put(ops->owner); } } out_nosup: ops = ERR_PTR(-ENOPROTOOPT); out: mutex_unlock(&nf_sockopt_mutex); return ops; } int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, sockptr_t opt, unsigned int len) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, 0); if (IS_ERR(ops)) return PTR_ERR(ops); ret = ops->set(sk, val, opt, len); module_put(ops->owner); return ret; } EXPORT_SYMBOL(nf_setsockopt); int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, 1); if (IS_ERR(ops)) return PTR_ERR(ops); ret = ops->get(sk, val, opt, len); module_put(ops->owner); return ret; } EXPORT_SYMBOL(nf_getsockopt); |
12 11 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 | // SPDX-License-Identifier: GPL-2.0-or-later /* In-software asymmetric public-key crypto subtype * * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "PKEY: "fmt #include <crypto/akcipher.h> #include <crypto/public_key.h> #include <crypto/sig.h> #include <keys/asymmetric-subtype.h> #include <linux/asn1.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* * Provide a part of a description of the key for /proc/keys. */ static void public_key_describe(const struct key *asymmetric_key, struct seq_file *m) { struct public_key *key = asymmetric_key->payload.data[asym_crypto]; if (key) seq_printf(m, "%s.%s", key->id_type, key->pkey_algo); } /* * Destroy a public key algorithm key. */ void public_key_free(struct public_key *key) { if (key) { kfree_sensitive(key->key); kfree(key->params); kfree(key); } } EXPORT_SYMBOL_GPL(public_key_free); /* * Destroy a public key algorithm key. */ static void public_key_destroy(void *payload0, void *payload3) { public_key_free(payload0); public_key_signature_free(payload3); } /* * Given a public_key, and an encoding and hash_algo to be used for signing * and/or verification with that key, determine the name of the corresponding * akcipher algorithm. Also check that encoding and hash_algo are allowed. */ static int software_key_determine_akcipher(const struct public_key *pkey, const char *encoding, const char *hash_algo, char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig, enum kernel_pkey_operation op) { int n; *sig = true; if (!encoding) return -EINVAL; if (strcmp(pkey->pkey_algo, "rsa") == 0) { /* * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. */ if (strcmp(encoding, "pkcs1") == 0) { *sig = op == kernel_pkey_sign || op == kernel_pkey_verify; if (!*sig) { /* * For encrypt/decrypt, hash_algo is not used * but allowed to be set for historic reasons. */ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", pkey->pkey_algo); } else { if (!hash_algo) hash_algo = "none"; n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1(%s,%s)", pkey->pkey_algo, hash_algo); } return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; } if (strcmp(encoding, "raw") != 0) return -EINVAL; /* * Raw RSA cannot differentiate between different hash * algorithms. */ if (hash_algo) return -EINVAL; *sig = false; } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { if (strcmp(encoding, "x962") != 0 && strcmp(encoding, "p1363") != 0) return -EINVAL; /* * ECDSA signatures are taken over a raw hash, so they don't * differentiate between different hash algorithms. That means * that the verifier should hard-code a specific hash algorithm. * Unfortunately, in practice ECDSA is used with multiple SHAs, * so we have to allow all of them and not just one. */ if (!hash_algo) return -EINVAL; if (strcmp(hash_algo, "sha1") != 0 && strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && strcmp(hash_algo, "sha512") != 0 && strcmp(hash_algo, "sha3-256") != 0 && strcmp(hash_algo, "sha3-384") != 0 && strcmp(hash_algo, "sha3-512") != 0) return -EINVAL; n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", encoding, pkey->pkey_algo); return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) { if (strcmp(encoding, "raw") != 0) return -EINVAL; if (!hash_algo) return -EINVAL; if (strcmp(hash_algo, "streebog256") != 0 && strcmp(hash_algo, "streebog512") != 0) return -EINVAL; } else { /* Unknown public key algorithm */ return -ENOPKG; } if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0) return -EINVAL; return 0; } static u8 *pkey_pack_u32(u8 *dst, u32 val) { memcpy(dst, &val, sizeof(val)); return dst + sizeof(val); } /* * Query information about a key. */ static int software_key_query(const struct kernel_pkey_params *params, struct kernel_pkey_query *info) { struct crypto_akcipher *tfm; struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; struct crypto_sig *sig; u8 *key, *ptr; int ret, len; bool issig; ret = software_key_determine_akcipher(pkey, params->encoding, params->hash_algo, alg_name, &issig, kernel_pkey_sign); if (ret < 0) return ret; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) return -ENOMEM; memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (issig) { sig = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(sig)) { ret = PTR_ERR(sig); goto error_free_key; } if (pkey->key_is_private) ret = crypto_sig_set_privkey(sig, key, pkey->keylen); else ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret < 0) goto error_free_tfm; len = crypto_sig_keysize(sig); info->max_sig_size = crypto_sig_maxsize(sig); info->max_data_size = crypto_sig_digestsize(sig); info->supported_ops = KEYCTL_SUPPORTS_VERIFY; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_SIGN; if (strcmp(params->encoding, "pkcs1") == 0) { info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; } } else { tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); goto error_free_key; } if (pkey->key_is_private) ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); else ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret < 0) goto error_free_tfm; len = crypto_akcipher_maxsize(tfm); info->max_sig_size = len; info->max_data_size = len; info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; } info->key_size = len * 8; info->max_enc_size = len; info->max_dec_size = len; ret = 0; error_free_tfm: if (issig) crypto_free_sig(sig); else crypto_free_akcipher(tfm); error_free_key: kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } /* * Do encryption, decryption and signing ops. */ static int software_key_eds_op(struct kernel_pkey_params *params, const void *in, void *out) { const struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; struct crypto_akcipher *tfm; struct crypto_sig *sig; char *key, *ptr; bool issig; int ret; pr_devel("==>%s()\n", __func__); ret = software_key_determine_akcipher(pkey, params->encoding, params->hash_algo, alg_name, &issig, params->op); if (ret < 0) return ret; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) return -ENOMEM; memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (issig) { sig = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(sig)) { ret = PTR_ERR(sig); goto error_free_key; } if (pkey->key_is_private) ret = crypto_sig_set_privkey(sig, key, pkey->keylen); else ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret) goto error_free_tfm; } else { tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); goto error_free_key; } if (pkey->key_is_private) ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); else ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret) goto error_free_tfm; } ret = -EINVAL; /* Perform the encryption calculation. */ switch (params->op) { case kernel_pkey_encrypt: if (issig) break; ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len, out, params->out_len); break; case kernel_pkey_decrypt: if (issig) break; ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len, out, params->out_len); break; case kernel_pkey_sign: if (!issig) break; ret = crypto_sig_sign(sig, in, params->in_len, out, params->out_len); break; default: BUG(); } if (!issig && ret == 0) ret = crypto_akcipher_maxsize(tfm); error_free_tfm: if (issig) crypto_free_sig(sig); else crypto_free_akcipher(tfm); error_free_key: kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { char alg_name[CRYPTO_MAX_ALG_NAME]; struct crypto_sig *tfm; char *key, *ptr; bool issig; int ret; pr_devel("==>%s()\n", __func__); BUG_ON(!pkey); BUG_ON(!sig); BUG_ON(!sig->s); /* * If the signature specifies a public key algorithm, it *must* match * the key's actual public key algorithm. * * Small exception: ECDSA signatures don't specify the curve, but ECDSA * keys do. So the strings can mismatch slightly in that case: * "ecdsa-nist-*" for the key, but "ecdsa" for the signature. */ if (sig->pkey_algo) { if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 && (strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 || strcmp(sig->pkey_algo, "ecdsa") != 0)) return -EKEYREJECTED; } ret = software_key_determine_akcipher(pkey, sig->encoding, sig->hash_algo, alg_name, &issig, kernel_pkey_verify); if (ret < 0) return ret; tfm = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) { ret = -ENOMEM; goto error_free_tfm; } memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (pkey->key_is_private) ret = crypto_sig_set_privkey(tfm, key, pkey->keylen); else ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen); if (ret) goto error_free_key; ret = crypto_sig_verify(tfm, sig->s, sig->s_size, sig->digest, sig->digest_size); error_free_key: kfree_sensitive(key); error_free_tfm: crypto_free_sig(tfm); pr_devel("<==%s() = %d\n", __func__, ret); if (WARN_ON_ONCE(ret > 0)) ret = -EINVAL; return ret; } EXPORT_SYMBOL_GPL(public_key_verify_signature); static int public_key_verify_signature_2(const struct key *key, const struct public_key_signature *sig) { const struct public_key *pk = key->payload.data[asym_crypto]; return public_key_verify_signature(pk, sig); } /* * Public key algorithm asymmetric key subtype */ struct asymmetric_key_subtype public_key_subtype = { .owner = THIS_MODULE, .name = "public_key", .name_len = sizeof("public_key") - 1, .describe = public_key_describe, .destroy = public_key_destroy, .query = software_key_query, .eds_op = software_key_eds_op, .verify_signature = public_key_verify_signature_2, }; EXPORT_SYMBOL_GPL(public_key_subtype); |
1012 17 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PGTABLE_DEFS_H #define _ASM_X86_PGTABLE_DEFS_H #include <linux/const.h> #include <linux/mem_encrypt.h> #include <asm/page_types.h> #define _PAGE_BIT_PRESENT 0 /* is present */ #define _PAGE_BIT_RW 1 /* writeable */ #define _PAGE_BIT_USER 2 /* userspace addressable */ #define _PAGE_BIT_PWT 3 /* page write through */ #define _PAGE_BIT_PCD 4 /* page cache disabled */ #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_SOFTW1 9 /* available for programmer */ #define _PAGE_BIT_SOFTW2 10 /* " */ #define _PAGE_BIT_SOFTW3 11 /* " */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SOFTW4 57 /* available for programmer */ #define _PAGE_BIT_SOFTW5 58 /* available for programmer */ #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */ #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */ #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */ #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 #ifdef CONFIG_X86_64 #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ #define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */ #else /* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */ #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */ #define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */ #endif /* If _PAGE_BIT_PRESENT is clear, we use these: */ /* - if the user mapped it with PROT_NONE; pte_present gives true */ #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1) #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2) #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) #define _PAGE_KERNEL_4K (_AT(pteval_t, 1) << _PAGE_BIT_KERNEL_4K) #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0) #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1) #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2) #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3) #else #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0)) #endif #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \ _PAGE_PKEY_BIT1 | \ _PAGE_PKEY_BIT2 | \ _PAGE_PKEY_BIT3) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) #else #define _PAGE_KNL_ERRATUM_MASK 0 #endif #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif /* * Tracking soft dirty bit when a page goes to a swap is tricky. * We need a bit which can be stored in pte _and_ not conflict * with swap entry format. On x86 bits 1-4 are *not* involved * into swap entry computation, but bit 7 is used for thp migration, * so we borrow bit 1 for soft dirty tracking. * * Please note that this bit must be treated as swap dirty page * mark if and only if the PTE/PMD has present bit clear! */ #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW #else #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) #endif #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP #define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP) #define _PAGE_SWP_UFFD_WP _PAGE_USER #else #define _PAGE_UFFD_WP (_AT(pteval_t, 0)) #define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0)) #endif #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) #else #define _PAGE_NX (_AT(pteval_t, 0)) #define _PAGE_DEVMAP (_AT(pteval_t, 0)) #define _PAGE_SOFTW4 (_AT(pteval_t, 0)) #endif /* * The hardware requires shadow stack to be Write=0,Dirty=1. However, * there are valid cases where the kernel might create read-only PTEs that * are dirty (e.g., fork(), mprotect(), uffd-wp(), soft-dirty tracking). In * this case, the _PAGE_SAVED_DIRTY bit is used instead of the HW-dirty bit, * to avoid creating a wrong "shadow stack" PTEs. Such PTEs have * (Write=0,SavedDirty=1,Dirty=0) set. */ #define _PAGE_SAVED_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SAVED_DIRTY) #define _PAGE_DIRTY_BITS (_PAGE_DIRTY | _PAGE_SAVED_DIRTY) #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) #define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW) /* * Set of bits not changed in pte_modify. The pte's * protection key is treated like _PAGE_RW, for * instance, and is *not* included in this mask since * pte_modify() does modify it. */ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) /* * The cache modes defined here are used to translate between pure SW usage * and the HW defined cache mode bits and/or PAT entries. * * The resulting bits for PWT, PCD and PAT should be chosen in a way * to have the WB mode at index 0 (all bits clear). This is the default * right now and likely would break too much if changed. */ #ifndef __ASSEMBLER__ enum page_cache_mode { _PAGE_CACHE_MODE_WB = 0, _PAGE_CACHE_MODE_WC = 1, _PAGE_CACHE_MODE_UC_MINUS = 2, _PAGE_CACHE_MODE_UC = 3, _PAGE_CACHE_MODE_WT = 4, _PAGE_CACHE_MODE_WP = 5, _PAGE_CACHE_MODE_NUM = 8 }; #endif #define _PAGE_CC (_AT(pteval_t, cc_get_mask())) #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT) #define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE) #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP)) #define __PP _PAGE_PRESENT #define __RW _PAGE_RW #define _USR _PAGE_USER #define ___A _PAGE_ACCESSED #define ___D _PAGE_DIRTY #define ___G _PAGE_GLOBAL #define __NX _PAGE_NX #define _ENC _PAGE_ENC #define __WP _PAGE_CACHE_WP #define __NC _PAGE_NOCACHE #define _PSE _PAGE_PSE #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) #define __pg(x) __pgprot(x) #define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G) #define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0) #define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0) #define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) #define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) /* * Page tables needs to have Write=1 in order for any lower PTEs to be * writable. This includes shadow stack memory (Write=0, Dirty=1) */ #define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0) #define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC) #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0) #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC) #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX| 0| 0|___G) #define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0| 0| 0|___G) #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC) #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX| 0| 0|___G) #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G) #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G) #define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP) #define __PAGE_KERNEL_IO __PAGE_KERNEL #define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE #ifndef __ASSEMBLER__ #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC) #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0) #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0) #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask) #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC) #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0) #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC) #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC) #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0) #define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC) #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC) #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC) #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC) #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC) #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO) #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE) #endif /* __ASSEMBLER__ */ /* * early identity mapping pte attrib macros. */ #ifdef CONFIG_X86_64 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC #else #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif #ifdef CONFIG_X86_32 # include <asm/pgtable_32_types.h> #else # include <asm/pgtable_64_types.h> #endif #ifndef __ASSEMBLER__ #include <linux/types.h> /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) /* * Extracts the flags from a (pte|pmd|pud|pgd)val_t * This includes the protection key value. */ #define PTE_FLAGS_MASK (~PTE_PFN_MASK) typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; typedef struct { pgdval_t pgd; } pgd_t; static inline pgprot_t pgprot_nx(pgprot_t prot) { return __pgprot(pgprot_val(prot) | _PAGE_NX); } #define pgprot_nx pgprot_nx #ifdef CONFIG_X86_PAE /* * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't * use it here. */ #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK) #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK) /* * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries. * All other bits are Reserved MBZ */ #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \ _PAGE_PWT | _PAGE_PCD | \ _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3) #else /* No need to mask any bits for !PAE */ #define PGD_ALLOWED_BITS (~0ULL) #endif static inline pgd_t native_make_pgd(pgdval_t val) { return (pgd_t) { val & PGD_ALLOWED_BITS }; } static inline pgdval_t native_pgd_val(pgd_t pgd) { return pgd.pgd & PGD_ALLOWED_BITS; } static inline pgdval_t pgd_flags(pgd_t pgd) { return native_pgd_val(pgd) & PTE_FLAGS_MASK; } #if CONFIG_PGTABLE_LEVELS > 4 typedef struct { p4dval_t p4d; } p4d_t; static inline p4d_t native_make_p4d(pudval_t val) { return (p4d_t) { val }; } static inline p4dval_t native_p4d_val(p4d_t p4d) { return p4d.p4d; } #else #include <asm-generic/pgtable-nop4d.h> static inline p4d_t native_make_p4d(pudval_t val) { return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) }; } static inline p4dval_t native_p4d_val(p4d_t p4d) { return native_pgd_val(p4d.pgd); } #endif #if CONFIG_PGTABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) { return (pud_t) { val }; } static inline pudval_t native_pud_val(pud_t pud) { return pud.pud; } #else #include <asm-generic/pgtable-nopud.h> static inline pud_t native_make_pud(pudval_t val) { return (pud_t) { .p4d.pgd = native_make_pgd(val) }; } static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.p4d.pgd); } #endif #if CONFIG_PGTABLE_LEVELS > 2 static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { .pmd = val }; } static inline pmdval_t native_pmd_val(pmd_t pmd) { return pmd.pmd; } #else #include <asm-generic/pgtable-nopmd.h> static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; } static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.p4d.pgd); } #endif static inline p4dval_t p4d_pfn_mask(p4d_t p4d) { /* No 512 GiB huge pages yet */ return PTE_PFN_MASK; } static inline p4dval_t p4d_flags_mask(p4d_t p4d) { return ~p4d_pfn_mask(p4d); } static inline p4dval_t p4d_flags(p4d_t p4d) { return native_p4d_val(p4d) & p4d_flags_mask(p4d); } static inline pudval_t pud_pfn_mask(pud_t pud) { if (native_pud_val(pud) & _PAGE_PSE) return PHYSICAL_PUD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pudval_t pud_flags_mask(pud_t pud) { return ~pud_pfn_mask(pud); } static inline pudval_t pud_flags(pud_t pud) { return native_pud_val(pud) & pud_flags_mask(pud); } static inline pmdval_t pmd_pfn_mask(pmd_t pmd) { if (native_pmd_val(pmd) & _PAGE_PSE) return PHYSICAL_PMD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pmdval_t pmd_flags_mask(pmd_t pmd) { return ~pmd_pfn_mask(pmd); } static inline pmdval_t pmd_flags(pmd_t pmd) { return native_pmd_val(pmd) & pmd_flags_mask(pmd); } static inline pte_t native_make_pte(pteval_t val) { return (pte_t) { .pte = val }; } static inline pteval_t native_pte_val(pte_t pte) { return pte.pte; } static inline pteval_t pte_flags(pte_t pte) { return native_pte_val(pte) & PTE_FLAGS_MASK; } #define __pte2cm_idx(cb) \ ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ (((cb) >> _PAGE_BIT_PWT) & 1)) #define __cm_idx2pte(i) \ ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ (((i) & 1) << _PAGE_BIT_PWT)) unsigned long cachemode2protval(enum page_cache_mode pcm); static inline pgprotval_t protval_4k_2_large(pgprotval_t val) { return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); } static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) { return __pgprot(protval_4k_2_large(pgprot_val(pgprot))); } static inline pgprotval_t protval_large_2_4k(pgprotval_t val) { return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT_LARGE) >> (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); } static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) { return __pgprot(protval_large_2_4k(pgprot_val(pgprot))); } typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern pteval_t __default_kernel_pte_mask; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); #define pgprot_writethrough pgprot_writethrough extern pgprot_t pgprot_writethrough(pgprot_t prot); /* Indicate that x86 has its own track and untrack pfn vma functions */ #define __HAVE_PFNMAP_TRACKING #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); #ifdef CONFIG_X86_32 extern void native_pagetable_init(void); #else #define native_pagetable_init paging_init #endif enum pg_level { PG_LEVEL_NONE, PG_LEVEL_4K, PG_LEVEL_2M, PG_LEVEL_1G, PG_LEVEL_512G, PG_LEVEL_256T, PG_LEVEL_NUM }; #ifdef CONFIG_PROC_FS extern void update_page_count(int level, unsigned long pages); #else static inline void update_page_count(int level, unsigned long pages) { } #endif /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, unsigned int *level); pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, unsigned int *level, bool *nx, bool *rw); extern pmd_t *lookup_pmd_address(unsigned long address); extern phys_addr_t slow_virt_to_phys(void *__address); extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, unsigned numpages, unsigned long page_flags); extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, unsigned long numpages); #endif /* !__ASSEMBLER__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ |
17 3 17 16 17 17 17 17 17 5 15 3 3 15 15 17 15 5 4 4 4 21 21 20 21 21 21 21 21 21 21 21 3 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 | // SPDX-License-Identifier: GPL-2.0-or-later #include <crypto/hash.h> #include <linux/cpu.h> #include <linux/kref.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/workqueue.h> #include <net/tcp.h> static size_t __scratch_size; struct sigpool_scratch { local_lock_t bh_lock; void __rcu *pad; }; static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), }; struct sigpool_entry { struct crypto_ahash *hash; const char *alg; struct kref kref; uint16_t needs_key:1, reserved:15; }; #define CPOOL_SIZE (PAGE_SIZE / sizeof(struct sigpool_entry)) static struct sigpool_entry cpool[CPOOL_SIZE]; static unsigned int cpool_populated; static DEFINE_MUTEX(cpool_mutex); /* Slow-path */ struct scratches_to_free { struct rcu_head rcu; unsigned int cnt; void *scratches[]; }; static void free_old_scratches(struct rcu_head *head) { struct scratches_to_free *stf; stf = container_of(head, struct scratches_to_free, rcu); while (stf->cnt--) kfree(stf->scratches[stf->cnt]); kfree(stf); } /** * sigpool_reserve_scratch - re-allocates scratch buffer, slow-path * @size: request size for the scratch/temp buffer */ static int sigpool_reserve_scratch(size_t size) { struct scratches_to_free *stf; size_t stf_sz = struct_size(stf, scratches, num_possible_cpus()); int cpu, err = 0; lockdep_assert_held(&cpool_mutex); if (__scratch_size >= size) return 0; stf = kmalloc(stf_sz, GFP_KERNEL); if (!stf) return -ENOMEM; stf->cnt = 0; size = max(size, __scratch_size); cpus_read_lock(); for_each_possible_cpu(cpu) { void *scratch, *old_scratch; scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (!scratch) { err = -ENOMEM; break; } old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), scratch, lockdep_is_held(&cpool_mutex)); if (!cpu_online(cpu) || !old_scratch) { kfree(old_scratch); continue; } stf->scratches[stf->cnt++] = old_scratch; } cpus_read_unlock(); if (!err) __scratch_size = size; call_rcu(&stf->rcu, free_old_scratches); return err; } static void sigpool_scratch_free(void) { int cpu; for_each_possible_cpu(cpu) kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), NULL, lockdep_is_held(&cpool_mutex))); __scratch_size = 0; } static int __cpool_try_clone(struct crypto_ahash *hash) { struct crypto_ahash *tmp; tmp = crypto_clone_ahash(hash); if (IS_ERR(tmp)) return PTR_ERR(tmp); crypto_free_ahash(tmp); return 0; } static int __cpool_alloc_ahash(struct sigpool_entry *e, const char *alg) { struct crypto_ahash *cpu0_hash; int ret; e->alg = kstrdup(alg, GFP_KERNEL); if (!e->alg) return -ENOMEM; cpu0_hash = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cpu0_hash)) { ret = PTR_ERR(cpu0_hash); goto out_free_alg; } e->needs_key = crypto_ahash_get_flags(cpu0_hash) & CRYPTO_TFM_NEED_KEY; ret = __cpool_try_clone(cpu0_hash); if (ret) goto out_free_cpu0_hash; e->hash = cpu0_hash; kref_init(&e->kref); return 0; out_free_cpu0_hash: crypto_free_ahash(cpu0_hash); out_free_alg: kfree(e->alg); e->alg = NULL; return ret; } /** * tcp_sigpool_alloc_ahash - allocates pool for ahash requests * @alg: name of async hash algorithm * @scratch_size: reserve a tcp_sigpool::scratch buffer of this size */ int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size) { int i, ret; /* slow-path */ mutex_lock(&cpool_mutex); ret = sigpool_reserve_scratch(scratch_size); if (ret) goto out; for (i = 0; i < cpool_populated; i++) { if (!cpool[i].alg) continue; if (strcmp(cpool[i].alg, alg)) continue; /* pairs with tcp_sigpool_release() */ if (!kref_get_unless_zero(&cpool[i].kref)) kref_init(&cpool[i].kref); ret = i; goto out; } for (i = 0; i < cpool_populated; i++) { if (!cpool[i].alg) break; } if (i >= CPOOL_SIZE) { ret = -ENOSPC; goto out; } ret = __cpool_alloc_ahash(&cpool[i], alg); if (!ret) { ret = i; if (i == cpool_populated) cpool_populated++; } out: mutex_unlock(&cpool_mutex); return ret; } EXPORT_SYMBOL_GPL(tcp_sigpool_alloc_ahash); static void __cpool_free_entry(struct sigpool_entry *e) { crypto_free_ahash(e->hash); kfree(e->alg); memset(e, 0, sizeof(*e)); } static void cpool_cleanup_work_cb(struct work_struct *work) { bool free_scratch = true; unsigned int i; mutex_lock(&cpool_mutex); for (i = 0; i < cpool_populated; i++) { if (kref_read(&cpool[i].kref) > 0) { free_scratch = false; continue; } if (!cpool[i].alg) continue; __cpool_free_entry(&cpool[i]); } if (free_scratch) sigpool_scratch_free(); mutex_unlock(&cpool_mutex); } static DECLARE_WORK(cpool_cleanup_work, cpool_cleanup_work_cb); static void cpool_schedule_cleanup(struct kref *kref) { schedule_work(&cpool_cleanup_work); } /** * tcp_sigpool_release - decreases number of users for a pool. If it was * the last user of the pool, releases any memory that was consumed. * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() */ void tcp_sigpool_release(unsigned int id) { if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) return; /* slow-path */ kref_put(&cpool[id].kref, cpool_schedule_cleanup); } EXPORT_SYMBOL_GPL(tcp_sigpool_release); /** * tcp_sigpool_get - increases number of users (refcounter) for a pool * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() */ void tcp_sigpool_get(unsigned int id) { if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) return; kref_get(&cpool[id].kref); } EXPORT_SYMBOL_GPL(tcp_sigpool_get); int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RCU_BH) { struct crypto_ahash *hash; rcu_read_lock_bh(); if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) { rcu_read_unlock_bh(); return -EINVAL; } hash = crypto_clone_ahash(cpool[id].hash); if (IS_ERR(hash)) { rcu_read_unlock_bh(); return PTR_ERR(hash); } c->req = ahash_request_alloc(hash, GFP_ATOMIC); if (!c->req) { crypto_free_ahash(hash); rcu_read_unlock_bh(); return -ENOMEM; } ahash_request_set_callback(c->req, 0, NULL, NULL); /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is * valid (allocated) until tcp_sigpool_end(). */ local_lock_nested_bh(&sigpool_scratch.bh_lock); c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad)); return 0; } EXPORT_SYMBOL_GPL(tcp_sigpool_start); void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH) { struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req); local_unlock_nested_bh(&sigpool_scratch.bh_lock); rcu_read_unlock_bh(); ahash_request_free(c->req); crypto_free_ahash(hash); } EXPORT_SYMBOL_GPL(tcp_sigpool_end); /** * tcp_sigpool_algo - return algorithm of tcp_sigpool * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() * @buf: buffer to return name of algorithm * @buf_len: size of @buf */ size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len) { if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) return -EINVAL; return strscpy(buf, cpool[id].alg, buf_len); } EXPORT_SYMBOL_GPL(tcp_sigpool_algo); /** * tcp_sigpool_hash_skb_data - hash data in skb with initialized tcp_sigpool * @hp: tcp_sigpool pointer * @skb: buffer to add sign for * @header_len: TCP header length for this segment */ int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, const struct sk_buff *skb, unsigned int header_len) { const unsigned int head_data_len = skb_headlen(skb) > header_len ? skb_headlen(skb) - header_len : 0; const struct skb_shared_info *shi = skb_shinfo(skb); const struct tcphdr *tp = tcp_hdr(skb); struct ahash_request *req = hp->req; struct sk_buff *frag_iter; struct scatterlist sg; unsigned int i; sg_init_table(&sg, 1); sg_set_buf(&sg, ((u8 *)tp) + header_len, head_data_len); ahash_request_set_crypt(req, &sg, NULL, head_data_len); if (crypto_ahash_update(req)) return 1; for (i = 0; i < shi->nr_frags; ++i) { const skb_frag_t *f = &shi->frags[i]; unsigned int offset = skb_frag_off(f); struct page *page; page = skb_frag_page(f) + (offset >> PAGE_SHIFT); sg_set_page(&sg, page, skb_frag_size(f), offset_in_page(offset)); ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); if (crypto_ahash_update(req)) return 1; } skb_walk_frags(skb, frag_iter) if (tcp_sigpool_hash_skb_data(hp, frag_iter, 0)) return 1; return 0; } EXPORT_SYMBOL(tcp_sigpool_hash_skb_data); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Per-CPU pool of crypto requests"); |
80 80 80 80 80 80 80 79 80 80 80 17 17 80 17 80 80 17 49 58 58 58 80 80 80 80 80 80 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the core interrupt handling code, for irq-chip based * architectures. Detailed information is available in * Documentation/core-api/genericirq.rst */ #include <linux/irq.h> #include <linux/msi.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/irqdomain.h> #include <trace/events/irq.h> #include "internals.h" static irqreturn_t bad_chained_irq(int irq, void *dev_id) { WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); return IRQ_NONE; } /* * Chained handlers should never call action on their IRQ. This default * action will emit warning if such thing happens. */ struct irqaction chained_action = { .handler = bad_chained_irq, }; /** * irq_set_chip - set the irq chip for an irq * @irq: irq number * @chip: pointer to irq chip description structure */ int irq_set_chip(unsigned int irq, const struct irq_chip *chip) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); irq_put_desc_unlock(desc, flags); /* * For !CONFIG_SPARSE_IRQ make the irq show up in * allocated_irqs. */ irq_mark_irq(irq); return 0; } EXPORT_SYMBOL(irq_set_chip); /** * irq_set_irq_type - set the irq trigger type for an irq * @irq: irq number * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h */ int irq_set_irq_type(unsigned int irq, unsigned int type) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); int ret = 0; if (!desc) return -EINVAL; ret = __irq_set_trigger(desc, type); irq_put_desc_busunlock(desc, flags); return ret; } EXPORT_SYMBOL(irq_set_irq_type); /** * irq_set_handler_data - set irq handler data for an irq * @irq: Interrupt number * @data: Pointer to interrupt specific data * * Set the hardware irq controller data for an irq */ int irq_set_handler_data(unsigned int irq, void *data) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->irq_common_data.handler_data = data; irq_put_desc_unlock(desc, flags); return 0; } EXPORT_SYMBOL(irq_set_handler_data); /** * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset * @irq_base: Interrupt number base * @irq_offset: Interrupt number offset * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq at offset */ int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; desc->irq_common_data.msi_desc = entry; if (entry && !irq_offset) entry->irq = irq_base; irq_put_desc_unlock(desc, flags); return 0; } /** * irq_set_msi_desc - set MSI descriptor data for an irq * @irq: Interrupt number * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq */ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) { return irq_set_msi_desc_off(irq, 0, entry); } /** * irq_set_chip_data - set irq chip data for an irq * @irq: Interrupt number * @data: Pointer to chip specific data * * Set the hardware irq chip data for an irq */ int irq_set_chip_data(unsigned int irq, void *data) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->irq_data.chip_data = data; irq_put_desc_unlock(desc, flags); return 0; } EXPORT_SYMBOL(irq_set_chip_data); struct irq_data *irq_get_irq_data(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); return desc ? &desc->irq_data : NULL; } EXPORT_SYMBOL_GPL(irq_get_irq_data); static void irq_state_clr_disabled(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); } static void irq_state_clr_masked(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); } static void irq_state_clr_started(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); } static void irq_state_set_started(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); } enum { IRQ_STARTUP_NORMAL, IRQ_STARTUP_MANAGED, IRQ_STARTUP_ABORT, }; #ifdef CONFIG_SMP static int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return IRQ_STARTUP_NORMAL; irqd_clr_managed_shutdown(d); if (!cpumask_intersects(aff, cpu_online_mask)) { /* * Catch code which fiddles with enable_irq() on a managed * and potentially shutdown IRQ. Chained interrupt * installment or irq auto probing should not happen on * managed irqs either. */ if (WARN_ON_ONCE(force)) return IRQ_STARTUP_ABORT; /* * The interrupt was requested, but there is no online CPU * in it's affinity mask. Put it into managed shutdown * state and let the cpu hotplug mechanism start it up once * a CPU in the mask becomes available. */ return IRQ_STARTUP_ABORT; } /* * Managed interrupts have reserved resources, so this should not * happen. */ if (WARN_ON(irq_domain_activate_irq(d, false))) return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED; } #else static __always_inline int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, bool force) { return IRQ_STARTUP_NORMAL; } #endif static void irq_enable(struct irq_desc *desc) { if (!irqd_irq_disabled(&desc->irq_data)) { unmask_irq(desc); } else { irq_state_clr_disabled(desc); if (desc->irq_data.chip->irq_enable) { desc->irq_data.chip->irq_enable(&desc->irq_data); irq_state_clr_masked(desc); } else { unmask_irq(desc); } } } static int __irq_startup(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); int ret = 0; /* Warn if this interrupt is not activated but try nevertheless */ WARN_ON_ONCE(!irqd_is_activated(d)); if (d->chip->irq_startup) { ret = d->chip->irq_startup(d); irq_state_clr_disabled(desc); irq_state_clr_masked(desc); } else { irq_enable(desc); } irq_state_set_started(desc); return ret; } int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; if (irqd_is_started(d)) { irq_enable(desc); } else { switch (__irq_startup_managed(desc, aff, force)) { case IRQ_STARTUP_NORMAL: if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) irq_setup_affinity(desc); ret = __irq_startup(desc); if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) irq_setup_affinity(desc); break; case IRQ_STARTUP_MANAGED: irq_do_set_affinity(d, aff, false); ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: irqd_set_managed_shutdown(d); return 0; } } if (resend) check_irq_resend(desc, false); return ret; } int irq_activate(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return irq_domain_activate_irq(d, false); return 0; } int irq_activate_and_startup(struct irq_desc *desc, bool resend) { if (WARN_ON(irq_activate(desc))) return 0; return irq_startup(desc, resend, IRQ_START_FORCE); } static void __irq_disable(struct irq_desc *desc, bool mask); void irq_shutdown(struct irq_desc *desc) { if (irqd_is_started(&desc->irq_data)) { clear_irq_resend(desc); desc->depth = 1; if (desc->irq_data.chip->irq_shutdown) { desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_state_set_disabled(desc); irq_state_set_masked(desc); } else { __irq_disable(desc, true); } irq_state_clr_started(desc); } } void irq_shutdown_and_deactivate(struct irq_desc *desc) { irq_shutdown(desc); /* * This must be called even if the interrupt was never started up, * because the activation can happen before the interrupt is * available for request/startup. It has it's own state tracking so * it's safe to call it unconditionally. */ irq_domain_deactivate_irq(&desc->irq_data); } static void __irq_disable(struct irq_desc *desc, bool mask) { if (irqd_irq_disabled(&desc->irq_data)) { if (mask) mask_irq(desc); } else { irq_state_set_disabled(desc); if (desc->irq_data.chip->irq_disable) { desc->irq_data.chip->irq_disable(&desc->irq_data); irq_state_set_masked(desc); } else if (mask) { mask_irq(desc); } } } /** * irq_disable - Mark interrupt disabled * @desc: irq descriptor which should be disabled * * If the chip does not implement the irq_disable callback, we * use a lazy disable approach. That means we mark the interrupt * disabled, but leave the hardware unmasked. That's an * optimization because we avoid the hardware access for the * common case where no interrupt happens after we marked it * disabled. If an interrupt happens, then the interrupt flow * handler masks the line at the hardware level and marks it * pending. * * If the interrupt chip does not implement the irq_disable callback, * a driver can disable the lazy approach for a particular irq line by * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can * be used for devices which cannot disable the interrupt at the * device level under certain circumstances and have to use * disable_irq[_nosync] instead. */ void irq_disable(struct irq_desc *desc) { __irq_disable(desc, irq_settings_disable_unlazy(desc)); } void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) { if (desc->irq_data.chip->irq_enable) desc->irq_data.chip->irq_enable(&desc->irq_data); else desc->irq_data.chip->irq_unmask(&desc->irq_data); cpumask_set_cpu(cpu, desc->percpu_enabled); } void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) { if (desc->irq_data.chip->irq_disable) desc->irq_data.chip->irq_disable(&desc->irq_data); else desc->irq_data.chip->irq_mask(&desc->irq_data); cpumask_clear_cpu(cpu, desc->percpu_enabled); } static inline void mask_ack_irq(struct irq_desc *desc) { if (desc->irq_data.chip->irq_mask_ack) { desc->irq_data.chip->irq_mask_ack(&desc->irq_data); irq_state_set_masked(desc); } else { mask_irq(desc); if (desc->irq_data.chip->irq_ack) desc->irq_data.chip->irq_ack(&desc->irq_data); } } void mask_irq(struct irq_desc *desc) { if (irqd_irq_masked(&desc->irq_data)) return; if (desc->irq_data.chip->irq_mask) { desc->irq_data.chip->irq_mask(&desc->irq_data); irq_state_set_masked(desc); } } void unmask_irq(struct irq_desc *desc) { if (!irqd_irq_masked(&desc->irq_data)) return; if (desc->irq_data.chip->irq_unmask) { desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_state_clr_masked(desc); } } void unmask_threaded_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; if (chip->flags & IRQCHIP_EOI_THREADED) chip->irq_eoi(&desc->irq_data); unmask_irq(desc); } /* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); action = desc->action; if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; raw_spin_unlock_irq(&desc->lock); return; } kstat_incr_irqs_this_cpu(desc); atomic_inc(&desc->threads_active); raw_spin_unlock_irq(&desc->lock); action_ret = IRQ_NONE; for_each_action_of_desc(desc, action) action_ret |= action->thread_fn(action->irq, action->dev_id); if (!irq_settings_no_debug(desc)) note_interrupt(desc, action_ret); wake_threads_waitq(desc); } EXPORT_SYMBOL_GPL(handle_nested_irq); static bool irq_check_poll(struct irq_desc *desc) { if (!(desc->istate & IRQS_POLL_INPROGRESS)) return false; return irq_wait_for_poll(desc); } static bool irq_may_run(struct irq_desc *desc) { unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; /* * If the interrupt is not in progress and is not an armed * wakeup interrupt, proceed. */ if (!irqd_has_set(&desc->irq_data, mask)) return true; /* * If the interrupt is an armed wakeup source, mark it pending * and suspended, disable it and notify the pm core about the * event. */ if (irq_pm_check_wakeup(desc)) return false; /* * Handle a potential concurrent poll on a different core. */ return irq_check_poll(desc); } /** * handle_simple_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt * handler or come from hardware, where no interrupt hardware control * is necessary. * * Note: The caller is expected to handle the ack, clear, mask and * unmask issues if necessary. */ void handle_simple_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); if (!irq_may_run(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; goto out_unlock; } kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_simple_irq); /** * handle_untracked_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Untracked interrupts are sent from a demultiplexing interrupt * handler when the demultiplexer does not know which device it its * multiplexed irq domain generated the interrupt. IRQ's handled * through here are not subjected to stats tracking, randomness, or * spurious interrupt detection. * * Note: Like handle_simple_irq, the caller is expected to handle * the ack, clear, mask and unmask issues if necessary. */ void handle_untracked_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); if (!irq_may_run(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; goto out_unlock; } desc->istate &= ~IRQS_PENDING; irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock(&desc->lock); __handle_irq_event_percpu(desc); raw_spin_lock(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_untracked_irq); /* * Called unconditionally from handle_level_irq() and only for oneshot * interrupts from handle_fasteoi_irq() */ static void cond_unmask_irq(struct irq_desc *desc) { /* * We need to unmask in the following cases: * - Standard level irq (IRQF_ONESHOT is not set) * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely). */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) unmask_irq(desc); } /** * handle_level_irq - Level type irq handler * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has * the active level. This may require to mask the interrupt and unmask * it after the associated handler has acknowledged the device, so the * interrupt line is back to inactive. */ void handle_level_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); mask_ack_irq(desc); if (!irq_may_run(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If its disabled or no action available * keep it masked and get out of here */ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; goto out_unlock; } kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); cond_unmask_irq(desc); out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) { if (!(desc->istate & IRQS_ONESHOT)) { chip->irq_eoi(&desc->irq_data); return; } /* * We need to unmask in the following cases: * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely). */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { chip->irq_eoi(&desc->irq_data); unmask_irq(desc); } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { chip->irq_eoi(&desc->irq_data); } } /** * handle_fasteoi_irq - irq handler for transparent controllers * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() * call when the interrupt has been serviced. This enables support * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; raw_spin_lock(&desc->lock); /* * When an affinity change races with IRQ handling, the next interrupt * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent. */ if (!irq_may_run(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; goto out; } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If its disabled or no action available * then mask it and get out of here: */ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; mask_irq(desc); goto out; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); /* * When the race described above happens this will resend the interrupt. */ if (unlikely(desc->istate & IRQS_PENDING)) check_irq_resend(desc, false); raw_spin_unlock(&desc->lock); return; out: if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_irq); /** * handle_fasteoi_nmi - irq handler for NMI interrupt lines * @desc: the interrupt description structure for this irq * * A simple NMI-safe handler, considering the restrictions * from request_nmi. * * Only a single callback will be issued to the chip: an ->eoi() * call when the interrupt has been serviced. This enables support * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ void handle_fasteoi_nmi(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; __kstat_incr_irqs_this_cpu(desc); trace_irq_handler_entry(irq, action); /* * NMIs cannot be shared, there is only one action. */ res = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, res); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); /** * handle_edge_irq - edge type IRQ handler * @desc: the interrupt description structure for this irq * * Interrupt occurs on the falling and/or rising edge of a hardware * signal. The occurrence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ void handle_edge_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); if (!irq_may_run(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } /* * If its disabled or no action available then mask it and get * out of here. */ if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } kstat_incr_irqs_this_cpu(desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { mask_irq(desc); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Reenable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_irq(desc); } handle_irq_event(desc); } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL(handle_edge_irq); /** * handle_percpu_irq - Per CPU local irq handler * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements */ void handle_percpu_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); /* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count. */ __kstat_incr_irqs_this_cpu(desc); if (chip->irq_ack) chip->irq_ack(&desc->irq_data); handle_irq_event_percpu(desc); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } /** * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements. Same as * handle_percpu_irq() above but with the following extras: * * action->percpu_dev_id is a pointer to percpu variables which * contain the real device id for the cpu on which this handler is * called */ void handle_percpu_devid_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; /* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count. */ __kstat_incr_irqs_this_cpu(desc); if (chip->irq_ack) chip->irq_ack(&desc->irq_data); if (likely(action)) { trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); trace_irq_handler_exit(irq, action, res); } else { unsigned int cpu = smp_processor_id(); bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); if (enabled) irq_percpu_disable(desc, cpu); pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", enabled ? " and unmasked" : "", irq, cpu); } if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } /** * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu * dev ids * @desc: the interrupt description structure for this irq * * Similar to handle_fasteoi_nmi, but handling the dev_id cookie * as a percpu pointer. */ void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; __kstat_incr_irqs_this_cpu(desc); trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); trace_irq_handler_exit(irq, action, res); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } static void __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained, const char *name) { if (!handle) { handle = handle_bad_irq; } else { struct irq_data *irq_data = &desc->irq_data; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* * With hierarchical domains we might run into a * situation where the outermost chip is not yet set * up, but the inner chips are there. Instead of * bailing we install the handler, but obviously we * cannot enable/startup the interrupt at this point. */ while (irq_data) { if (irq_data->chip != &no_irq_chip) break; /* * Bail out if the outer chip is not set up * and the interrupt supposed to be started * right away. */ if (WARN_ON(is_chained)) return; /* Try the parent */ irq_data = irq_data->parent_data; } #endif if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) return; } /* Uninstall? */ if (handle == handle_bad_irq) { if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc); irq_state_set_disabled(desc); if (is_chained) { desc->action = NULL; WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); } desc->depth = 1; } desc->handle_irq = handle; desc->name = name; if (handle != handle_bad_irq && is_chained) { unsigned int type = irqd_get_trigger_type(&desc->irq_data); /* * We're about to start this interrupt immediately, * hence the need to set the trigger configuration. * But the .set_type callback may have overridden the * flow handler, ignoring that we're dealing with a * chained interrupt. Reset it immediately because we * do know better. */ if (type != IRQ_TYPE_NONE) { __irq_set_trigger(desc, type); desc->handle_irq = handle; } irq_settings_set_noprobe(desc); irq_settings_set_norequest(desc); irq_settings_set_nothread(desc); desc->action = &chained_action; WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); irq_activate_and_startup(desc, IRQ_RESEND); } } void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return; __irq_do_set_handler(desc, handle, is_chained, name); irq_put_desc_busunlock(desc, flags); } EXPORT_SYMBOL_GPL(__irq_set_handler); void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, void *data) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return; desc->irq_common_data.handler_data = data; __irq_do_set_handler(desc, handle, 1, NULL); irq_put_desc_busunlock(desc, flags); } EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); void irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name) { irq_set_chip(irq, chip); __irq_set_handler(irq, handle, 0, name); } EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { unsigned long flags, trigger, tmp; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return; /* * Warn when a driver sets the no autoenable flag on an already * active interrupt. */ WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); irq_settings_clr_and_set(desc, clr, set); trigger = irqd_get_trigger_type(&desc->irq_data); irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | IRQD_TRIGGER_MASK | IRQD_LEVEL); if (irq_settings_has_no_balance_set(desc)) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); if (irq_settings_is_per_cpu(desc)) irqd_set(&desc->irq_data, IRQD_PER_CPU); if (irq_settings_is_level(desc)) irqd_set(&desc->irq_data, IRQD_LEVEL); tmp = irq_settings_get_trigger_mask(desc); if (tmp != IRQ_TYPE_NONE) trigger = tmp; irqd_set(&desc->irq_data, trigger); irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(irq_modify_status); #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE /** * irq_cpu_online - Invoke all irq_cpu_online functions. * * Iterate through all irqs and invoke the chip.irq_cpu_online() * for each. */ void irq_cpu_online(void) { struct irq_desc *desc; struct irq_chip *chip; unsigned long flags; unsigned int irq; for_each_active_irq(irq) { desc = irq_to_desc(irq); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_online && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_online(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } } /** * irq_cpu_offline - Invoke all irq_cpu_offline functions. * * Iterate through all irqs and invoke the chip.irq_cpu_offline() * for each. */ void irq_cpu_offline(void) { struct irq_desc *desc; struct irq_chip *chip; unsigned long flags; unsigned int irq; for_each_active_irq(irq) { desc = irq_to_desc(irq); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS /** * handle_fasteoi_ack_irq - irq handler for edge hierarchy * stacked on transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where * the irq_chip also needs to have its ->irq_ack() function * called. */ void handle_fasteoi_ack_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; raw_spin_lock(&desc->lock); if (!irq_may_run(desc)) goto out; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If its disabled or no action available * then mask it and get out of here: */ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; mask_irq(desc); goto out; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); raw_spin_unlock(&desc->lock); return; out: if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); /** * handle_fasteoi_mask_irq - irq handler for level hierarchy * stacked on transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where * the irq_chip also needs to have its ->irq_mask_ack() function * called. */ void handle_fasteoi_mask_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; raw_spin_lock(&desc->lock); mask_ack_irq(desc); if (!irq_may_run(desc)) goto out; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If its disabled or no action available * then mask it and get out of here: */ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; mask_irq(desc); goto out; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); raw_spin_unlock(&desc->lock); return; out: if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ /** * irq_chip_set_parent_state - set the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * Conditional success, if the underlying irqchip does not implement it. */ int irq_chip_set_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool val) { data = data->parent_data; if (!data || !data->chip->irq_set_irqchip_state) return 0; return data->chip->irq_set_irqchip_state(data, which, val); } EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); /** * irq_chip_get_parent_state - get the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: one of IRQCHIP_STATE_* the caller wants to know * @state: a pointer to a boolean where the state is to be stored * * Conditional success, if the underlying irqchip does not implement it. */ int irq_chip_get_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { data = data->parent_data; if (!data || !data->chip->irq_get_irqchip_state) return 0; return data->chip->irq_get_irqchip_state(data, which, state); } EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); /** * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * NULL) * @data: Pointer to interrupt specific data */ void irq_chip_enable_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_enable) data->chip->irq_enable(data); else data->chip->irq_unmask(data); } EXPORT_SYMBOL_GPL(irq_chip_enable_parent); /** * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if * NULL) * @data: Pointer to interrupt specific data */ void irq_chip_disable_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_disable) data->chip->irq_disable(data); else data->chip->irq_mask(data); } EXPORT_SYMBOL_GPL(irq_chip_disable_parent); /** * irq_chip_ack_parent - Acknowledge the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_ack_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_ack(data); } EXPORT_SYMBOL_GPL(irq_chip_ack_parent); /** * irq_chip_mask_parent - Mask the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_mask_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_mask(data); } EXPORT_SYMBOL_GPL(irq_chip_mask_parent); /** * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_mask_ack_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_mask_ack(data); } EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); /** * irq_chip_unmask_parent - Unmask the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_unmask_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_unmask(data); } EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); /** * irq_chip_eoi_parent - Invoke EOI on the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_eoi_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_eoi(data); } EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); /** * irq_chip_set_affinity_parent - Set affinity on the parent interrupt * @data: Pointer to interrupt specific data * @dest: The affinity mask to set * @force: Flag to enforce setting (disable online checks) * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_affinity_parent(struct irq_data *data, const struct cpumask *dest, bool force) { data = data->parent_data; if (data->chip->irq_set_affinity) return data->chip->irq_set_affinity(data, dest, force); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); /** * irq_chip_set_type_parent - Set IRQ type on the parent interrupt * @data: Pointer to interrupt specific data * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) { data = data->parent_data; if (data->chip->irq_set_type) return data->chip->irq_set_type(data, type); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); /** * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware * @data: Pointer to interrupt specific data * * Iterate through the domain hierarchy of the interrupt and check * whether a hw retrigger function exists. If yes, invoke it. */ int irq_chip_retrigger_hierarchy(struct irq_data *data) { for (data = data->parent_data; data; data = data->parent_data) if (data->chip && data->chip->irq_retrigger) return data->chip->irq_retrigger(data); return 0; } EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); /** * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt * @data: Pointer to interrupt specific data * @vcpu_info: The vcpu affinity information */ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) { data = data->parent_data; if (data->chip->irq_set_vcpu_affinity) return data->chip->irq_set_vcpu_affinity(data, vcpu_info); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); /** * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt * @data: Pointer to interrupt specific data * @on: Whether to set or reset the wake-up capability of this irq * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) { data = data->parent_data; if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (data->chip->irq_set_wake) return data->chip->irq_set_wake(data, on); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); /** * irq_chip_request_resources_parent - Request resources on the parent interrupt * @data: Pointer to interrupt specific data */ int irq_chip_request_resources_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_request_resources) return data->chip->irq_request_resources(data); /* no error on missing optional irq_chip::irq_request_resources */ return 0; } EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); /** * irq_chip_release_resources_parent - Release resources on the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_release_resources_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_release_resources) data->chip->irq_release_resources(data); } EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); #endif /** * irq_chip_compose_msi_msg - Compose msi message for a irq chip * @data: Pointer to interrupt specific data * @msg: Pointer to the MSI message * * For hierarchical domains we find the first chip in the hierarchy * which implements the irq_compose_msi_msg callback. For non * hierarchical we use the top level chip. */ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct irq_data *pos; for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { if (data->chip && data->chip->irq_compose_msi_msg) pos = data; } if (!pos) return -ENOSYS; pos->chip->irq_compose_msi_msg(pos, msg); return 0; } static struct device *irq_get_pm_device(struct irq_data *data) { if (data->domain) return data->domain->pm_dev; return NULL; } /** * irq_chip_pm_get - Enable power for an IRQ chip * @data: Pointer to interrupt specific data * * Enable the power to the IRQ chip referenced by the interrupt data * structure. */ int irq_chip_pm_get(struct irq_data *data) { struct device *dev = irq_get_pm_device(data); int retval = 0; if (IS_ENABLED(CONFIG_PM) && dev) retval = pm_runtime_resume_and_get(dev); return retval; } /** * irq_chip_pm_put - Disable power for an IRQ chip * @data: Pointer to interrupt specific data * * Disable the power to the IRQ chip referenced by the interrupt data * structure, belongs. Note that power will only be disabled, once this * function has been called for all IRQs that have called irq_chip_pm_get(). */ int irq_chip_pm_put(struct irq_data *data) { struct device *dev = irq_get_pm_device(data); int retval = 0; if (IS_ENABLED(CONFIG_PM) && dev) retval = pm_runtime_put(dev); return (retval < 0) ? retval : 0; } |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Core registration and callback routines for MTD * drivers and users. * * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * Copyright © 2006 Red Hat UK Limited */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/ioctl.h> #include <linux/init.h> #include <linux/of.h> #include <linux/proc_fs.h> #include <linux/idr.h> #include <linux/backing-dev.h> #include <linux/gfp.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/reboot.h> #include <linux/leds.h> #include <linux/debugfs.h> #include <linux/nvmem-provider.h> #include <linux/root_dev.h> #include <linux/error-injection.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include "mtdcore.h" struct backing_dev_info *mtd_bdi; #ifdef CONFIG_PM_SLEEP static int mtd_cls_suspend(struct device *dev) { struct mtd_info *mtd = dev_get_drvdata(dev); return mtd ? mtd_suspend(mtd) : 0; } static int mtd_cls_resume(struct device *dev) { struct mtd_info *mtd = dev_get_drvdata(dev); if (mtd) mtd_resume(mtd); return 0; } static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) #else #define MTD_CLS_PM_OPS NULL #endif static struct class mtd_class = { .name = "mtd", .pm = MTD_CLS_PM_OPS, }; static DEFINE_IDR(mtd_idr); /* These are exported solely for the purpose of mtd_blkdevs.c. You should not use them for _anything_ else */ DEFINE_MUTEX(mtd_table_mutex); EXPORT_SYMBOL_GPL(mtd_table_mutex); struct mtd_info *__mtd_next_device(int i) { return idr_get_next(&mtd_idr, &i); } EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... */ static void mtd_release(struct device *dev) { struct mtd_info *mtd = dev_get_drvdata(dev); dev_t index = MTD_DEVT(mtd->index); idr_remove(&mtd_idr, mtd->index); of_node_put(mtd_get_of_node(mtd)); if (mtd_is_partition(mtd)) release_mtd_partition(mtd); /* remove /dev/mtdXro node */ device_destroy(&mtd_class, index + 1); } static void mtd_device_release(struct kref *kref) { struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); bool is_partition = mtd_is_partition(mtd); debugfs_remove_recursive(mtd->dbg.dfs_dir); /* Try to remove the NVMEM provider */ nvmem_unregister(mtd->nvmem); device_unregister(&mtd->dev); /* * Clear dev so mtd can be safely re-registered later if desired. * Should not be done for partition, * as it was already destroyed in device_unregister(). */ if (!is_partition) memset(&mtd->dev, 0, sizeof(mtd->dev)); module_put(THIS_MODULE); } #define MTD_DEVICE_ATTR_RO(name) \ static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) #define MTD_DEVICE_ATTR_RW(name) \ static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) static ssize_t mtd_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); char *type; switch (mtd->type) { case MTD_ABSENT: type = "absent"; break; case MTD_RAM: type = "ram"; break; case MTD_ROM: type = "rom"; break; case MTD_NORFLASH: type = "nor"; break; case MTD_NANDFLASH: type = "nand"; break; case MTD_DATAFLASH: type = "dataflash"; break; case MTD_UBIVOLUME: type = "ubi"; break; case MTD_MLCNANDFLASH: type = "mlc-nand"; break; default: type = "unknown"; } return sysfs_emit(buf, "%s\n", type); } MTD_DEVICE_ATTR_RO(type); static ssize_t mtd_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags); } MTD_DEVICE_ATTR_RO(flags); static ssize_t mtd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size); } MTD_DEVICE_ATTR_RO(size); static ssize_t mtd_erasesize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize); } MTD_DEVICE_ATTR_RO(erasesize); static ssize_t mtd_writesize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize); } MTD_DEVICE_ATTR_RO(writesize); static ssize_t mtd_subpagesize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; return sysfs_emit(buf, "%u\n", subpagesize); } MTD_DEVICE_ATTR_RO(subpagesize); static ssize_t mtd_oobsize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize); } MTD_DEVICE_ATTR_RO(oobsize); static ssize_t mtd_oobavail_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", mtd->oobavail); } MTD_DEVICE_ATTR_RO(oobavail); static ssize_t mtd_numeraseregions_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", mtd->numeraseregions); } MTD_DEVICE_ATTR_RO(numeraseregions); static ssize_t mtd_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", mtd->name); } MTD_DEVICE_ATTR_RO(name); static ssize_t mtd_ecc_strength_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", mtd->ecc_strength); } MTD_DEVICE_ATTR_RO(ecc_strength); static ssize_t mtd_bitflip_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold); } static ssize_t mtd_bitflip_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mtd_info *mtd = dev_get_drvdata(dev); unsigned int bitflip_threshold; int retval; retval = kstrtouint(buf, 0, &bitflip_threshold); if (retval) return retval; mtd->bitflip_threshold = bitflip_threshold; return count; } MTD_DEVICE_ATTR_RW(bitflip_threshold); static ssize_t mtd_ecc_step_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", mtd->ecc_step_size); } MTD_DEVICE_ATTR_RO(ecc_step_size); static ssize_t mtd_corrected_bits_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; return sysfs_emit(buf, "%u\n", ecc_stats->corrected); } MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ static ssize_t mtd_ecc_failures_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; return sysfs_emit(buf, "%u\n", ecc_stats->failed); } MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ static ssize_t mtd_bad_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; return sysfs_emit(buf, "%u\n", ecc_stats->badblocks); } MTD_DEVICE_ATTR_RO(bad_blocks); static ssize_t mtd_bbt_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks); } MTD_DEVICE_ATTR_RO(bbt_blocks); static struct attribute *mtd_attrs[] = { &dev_attr_type.attr, &dev_attr_flags.attr, &dev_attr_size.attr, &dev_attr_erasesize.attr, &dev_attr_writesize.attr, &dev_attr_subpagesize.attr, &dev_attr_oobsize.attr, &dev_attr_oobavail.attr, &dev_attr_numeraseregions.attr, &dev_attr_name.attr, &dev_attr_ecc_strength.attr, &dev_attr_ecc_step_size.attr, &dev_attr_corrected_bits.attr, &dev_attr_ecc_failures.attr, &dev_attr_bad_blocks.attr, &dev_attr_bbt_blocks.attr, &dev_attr_bitflip_threshold.attr, NULL, }; ATTRIBUTE_GROUPS(mtd); static const struct device_type mtd_devtype = { .name = "mtd", .groups = mtd_groups, .release = mtd_release, }; static bool mtd_expert_analysis_mode; #ifdef CONFIG_DEBUG_FS bool mtd_check_expert_analysis_mode(void) { const char *mtd_expert_analysis_warning = "Bad block checks have been entirely disabled.\n" "This is only reserved for post-mortem forensics and debug purposes.\n" "Never enable this mode if you do not know what you are doing!\n"; return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning); } EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); #endif static struct dentry *dfs_dir_mtd; static void mtd_debugfs_populate(struct mtd_info *mtd) { struct device *dev = &mtd->dev; if (IS_ERR_OR_NULL(dfs_dir_mtd)) return; mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); } #ifndef CONFIG_MMU unsigned mtd_mmap_capabilities(struct mtd_info *mtd) { switch (mtd->type) { case MTD_RAM: return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | NOMMU_MAP_READ | NOMMU_MAP_WRITE; case MTD_ROM: return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | NOMMU_MAP_READ; default: return NOMMU_MAP_COPY; } } EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); #endif static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, void *cmd) { struct mtd_info *mtd; mtd = container_of(n, struct mtd_info, reboot_notifier); mtd->_reboot(mtd); return NOTIFY_DONE; } /** * mtd_wunit_to_pairing_info - get pairing information of a wunit * @mtd: pointer to new MTD device info structure * @wunit: write unit we are interested in * @info: returned pairing information * * Retrieve pairing information associated to the wunit. * This is mainly useful when dealing with MLC/TLC NANDs where pages can be * paired together, and where programming a page may influence the page it is * paired with. * The notion of page is replaced by the term wunit (write-unit) to stay * consistent with the ->writesize field. * * The @wunit argument can be extracted from an absolute offset using * mtd_offset_to_wunit(). @info is filled with the pairing information attached * to @wunit. * * From the pairing info the MTD user can find all the wunits paired with * @wunit using the following loop: * * for (i = 0; i < mtd_pairing_groups(mtd); i++) { * info.pair = i; * mtd_pairing_info_to_wunit(mtd, &info); * ... * } */ int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, struct mtd_pairing_info *info) { struct mtd_info *master = mtd_get_master(mtd); int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); if (wunit < 0 || wunit >= npairs) return -EINVAL; if (master->pairing && master->pairing->get_info) return master->pairing->get_info(master, wunit, info); info->group = 0; info->pair = wunit; return 0; } EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); /** * mtd_pairing_info_to_wunit - get wunit from pairing information * @mtd: pointer to new MTD device info structure * @info: pairing information struct * * Returns a positive number representing the wunit associated to the info * struct, or a negative error code. * * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() * doc). * * It can also be used to only program the first page of each pair (i.e. * page attached to group 0), which allows one to use an MLC NAND in * software-emulated SLC mode: * * info.group = 0; * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); * for (info.pair = 0; info.pair < npairs; info.pair++) { * wunit = mtd_pairing_info_to_wunit(mtd, &info); * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), * mtd->writesize, &retlen, buf + (i * mtd->writesize)); * } */ int mtd_pairing_info_to_wunit(struct mtd_info *mtd, const struct mtd_pairing_info *info) { struct mtd_info *master = mtd_get_master(mtd); int ngroups = mtd_pairing_groups(master); int npairs = mtd_wunit_per_eb(master) / ngroups; if (!info || info->pair < 0 || info->pair >= npairs || info->group < 0 || info->group >= ngroups) return -EINVAL; if (master->pairing && master->pairing->get_wunit) return mtd->pairing->get_wunit(master, info); return info->pair; } EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); /** * mtd_pairing_groups - get the number of pairing groups * @mtd: pointer to new MTD device info structure * * Returns the number of pairing groups. * * This number is usually equal to the number of bits exposed by a single * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() * to iterate over all pages of a given pair. */ int mtd_pairing_groups(struct mtd_info *mtd) { struct mtd_info *master = mtd_get_master(mtd); if (!master->pairing || !master->pairing->ngroups) return 1; return master->pairing->ngroups; } EXPORT_SYMBOL_GPL(mtd_pairing_groups); static int mtd_nvmem_reg_read(void *priv, unsigned int offset, void *val, size_t bytes) { struct mtd_info *mtd = priv; size_t retlen; int err; err = mtd_read(mtd, offset, bytes, &retlen, val); if (err && err != -EUCLEAN) return err; return retlen == bytes ? 0 : -EIO; } static int mtd_nvmem_add(struct mtd_info *mtd) { struct device_node *node = mtd_get_of_node(mtd); struct nvmem_config config = {}; config.id = NVMEM_DEVID_NONE; config.dev = &mtd->dev; config.name = dev_name(&mtd->dev); config.owner = THIS_MODULE; config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells"); config.reg_read = mtd_nvmem_reg_read; config.size = mtd->size; config.word_size = 1; config.stride = 1; config.read_only = true; config.root_only = true; config.ignore_wp = true; config.priv = mtd; mtd->nvmem = nvmem_register(&config); if (IS_ERR(mtd->nvmem)) { /* Just ignore if there is no NVMEM support in the kernel */ if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) mtd->nvmem = NULL; else return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem), "Failed to register NVMEM device\n"); } return 0; } static void mtd_check_of_node(struct mtd_info *mtd) { struct device_node *partitions, *parent_dn, *mtd_dn = NULL; const char *pname, *prefix = "partition-"; int plen, mtd_name_len, offset, prefix_len; /* Check if MTD already has a device node */ if (mtd_get_of_node(mtd)) return; if (!mtd_is_partition(mtd)) return; parent_dn = of_node_get(mtd_get_of_node(mtd->parent)); if (!parent_dn) return; if (mtd_is_partition(mtd->parent)) partitions = of_node_get(parent_dn); else partitions = of_get_child_by_name(parent_dn, "partitions"); if (!partitions) goto exit_parent; prefix_len = strlen(prefix); mtd_name_len = strlen(mtd->name); /* Search if a partition is defined with the same name */ for_each_child_of_node(partitions, mtd_dn) { /* Skip partition with no/wrong prefix */ if (!of_node_name_prefix(mtd_dn, prefix)) continue; /* Label have priority. Check that first */ if (!of_property_read_string(mtd_dn, "label", &pname)) { offset = 0; } else { pname = mtd_dn->name; offset = prefix_len; } plen = strlen(pname) - offset; if (plen == mtd_name_len && !strncmp(mtd->name, pname + offset, plen)) { mtd_set_of_node(mtd, mtd_dn); of_node_put(mtd_dn); break; } } of_node_put(partitions); exit_parent: of_node_put(parent_dn); } /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure * * Add a device to the list of MTD devices present in the system, and * notify each currently active MTD 'user' of its arrival. Returns * zero on success or non-zero on failure. */ int add_mtd_device(struct mtd_info *mtd) { struct device_node *np = mtd_get_of_node(mtd); struct mtd_info *master = mtd_get_master(mtd); struct mtd_notifier *not; int i, error, ofidx; /* * May occur, for instance, on buggy drivers which call * mtd_device_parse_register() multiple times on the same master MTD, * especially with CONFIG_MTD_PARTITIONED_MASTER=y. */ if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) return -EEXIST; BUG_ON(mtd->writesize == 0); /* * MTD drivers should implement ->_{write,read}() or * ->_{write,read}_oob(), but not both. */ if (WARN_ON((mtd->_write && mtd->_write_oob) || (mtd->_read && mtd->_read_oob))) return -EINVAL; if (WARN_ON((!mtd->erasesize || !master->_erase) && !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; /* * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the * master is an MLC NAND and has a proper pairing scheme defined. * We also reject masters that implement ->_writev() for now, because * NAND controller drivers don't implement this hook, and adding the * SLC -> MLC address/length conversion to this path is useless if we * don't have a user. */ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || !master->pairing || master->_writev)) return -EINVAL; mutex_lock(&mtd_table_mutex); ofidx = -1; if (np) ofidx = of_alias_get_id(np, "mtd"); if (ofidx >= 0) i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); else i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); if (i < 0) { error = i; goto fail_locked; } mtd->index = i; kref_init(&mtd->refcnt); /* default value if not set by driver */ if (mtd->bitflip_threshold == 0) mtd->bitflip_threshold = mtd->ecc_strength; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { int ngroups = mtd_pairing_groups(master); mtd->erasesize /= ngroups; mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * mtd->erasesize; } if (is_power_of_2(mtd->erasesize)) mtd->erasesize_shift = ffs(mtd->erasesize) - 1; else mtd->erasesize_shift = 0; if (is_power_of_2(mtd->writesize)) mtd->writesize_shift = ffs(mtd->writesize) - 1; else mtd->writesize_shift = 0; mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; /* Some chips always power up locked. Unlock them now */ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { error = mtd_unlock(mtd, 0, mtd->size); if (error && error != -EOPNOTSUPP) printk(KERN_WARNING "%s: unlock failed, writes may not work\n", mtd->name); /* Ignore unlock failures? */ error = 0; } /* Caller should have set dev.parent to match the * physical device, if appropriate. */ mtd->dev.type = &mtd_devtype; mtd->dev.class = &mtd_class; mtd->dev.devt = MTD_DEVT(i); error = dev_set_name(&mtd->dev, "mtd%d", i); if (error) goto fail_devname; dev_set_drvdata(&mtd->dev, mtd); mtd_check_of_node(mtd); of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); if (error) { put_device(&mtd->dev); goto fail_added; } /* Add the nvmem provider */ error = mtd_nvmem_add(mtd); if (error) goto fail_nvmem_add; mtd_debugfs_populate(mtd); device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, "mtd%dro", i); pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) not->add(mtd); mutex_unlock(&mtd_table_mutex); if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { if (IS_BUILTIN(CONFIG_MTD)) { pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name); ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); } else { pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", mtd->index, mtd->name); } } /* We _know_ we aren't being removed, because our caller is still holding us here. So none of this try_ nonsense, and no bitching about it either. :) */ __module_get(THIS_MODULE); return 0; fail_nvmem_add: device_unregister(&mtd->dev); fail_added: of_node_put(mtd_get_of_node(mtd)); fail_devname: idr_remove(&mtd_idr, i); fail_locked: mutex_unlock(&mtd_table_mutex); return error; } /** * del_mtd_device - unregister an MTD device * @mtd: pointer to MTD device info structure * * Remove a device from the list of MTD devices present in the system, * and notify each currently active MTD 'user' of its departure. * Returns zero on success or 1 on failure, which currently will happen * if the requested device does not appear to be present in the list. */ int del_mtd_device(struct mtd_info *mtd) { int ret; struct mtd_notifier *not; mutex_lock(&mtd_table_mutex); if (idr_find(&mtd_idr, mtd->index) != mtd) { ret = -ENODEV; goto out_error; } /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) not->remove(mtd); kref_put(&mtd->refcnt, mtd_device_release); ret = 0; out_error: mutex_unlock(&mtd_table_mutex); return ret; } /* * Set a few defaults based on the parent devices, if not provided by the * driver */ static void mtd_set_dev_defaults(struct mtd_info *mtd) { if (mtd->dev.parent) { if (!mtd->owner && mtd->dev.parent->driver) mtd->owner = mtd->dev.parent->driver->owner; if (!mtd->name) mtd->name = dev_name(mtd->dev.parent); } else { pr_debug("mtd device won't show a device symlink in sysfs\n"); } INIT_LIST_HEAD(&mtd->partitions); mutex_init(&mtd->master.partitions_lock); mutex_init(&mtd->master.chrdev_lock); } static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) { struct otp_info *info; ssize_t size = 0; unsigned int i; size_t retlen; int ret; info = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!info) return -ENOMEM; if (is_user) ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info); else ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info); if (ret) goto err; for (i = 0; i < retlen / sizeof(*info); i++) size += info[i].length; kfree(info); return size; err: kfree(info); /* ENODATA means there is no OTP region. */ return ret == -ENODATA ? 0 : ret; } static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, const char *compatible, int size, nvmem_reg_read_t reg_read) { struct nvmem_device *nvmem = NULL; struct nvmem_config config = {}; struct device_node *np; /* DT binding is optional */ np = of_get_compatible_child(mtd->dev.of_node, compatible); /* OTP nvmem will be registered on the physical device */ config.dev = mtd->dev.parent; config.name = compatible; config.id = NVMEM_DEVID_AUTO; config.owner = THIS_MODULE; config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd); config.type = NVMEM_TYPE_OTP; config.root_only = true; config.ignore_wp = true; config.reg_read = reg_read; config.size = size; config.of_node = np; config.priv = mtd; nvmem = nvmem_register(&config); /* Just ignore if there is no NVMEM support in the kernel */ if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP) nvmem = NULL; of_node_put(np); return nvmem; } static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, void *val, size_t bytes) { struct mtd_info *mtd = priv; size_t retlen; int ret; ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val); if (ret) return ret; return retlen == bytes ? 0 : -EIO; } static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, void *val, size_t bytes) { struct mtd_info *mtd = priv; size_t retlen; int ret; ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val); if (ret) return ret; return retlen == bytes ? 0 : -EIO; } static int mtd_otp_nvmem_add(struct mtd_info *mtd) { struct device *dev = mtd->dev.parent; struct nvmem_device *nvmem; ssize_t size; int err; if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { size = mtd_otp_size(mtd, true); if (size < 0) { err = size; goto err; } if (size > 0) { nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size, mtd_nvmem_user_otp_reg_read); if (IS_ERR(nvmem)) { err = PTR_ERR(nvmem); goto err; } mtd->otp_user_nvmem = nvmem; } } if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { size = mtd_otp_size(mtd, false); if (size < 0) { err = size; goto err; } if (size > 0) { /* * The factory OTP contains thing such as a unique serial * number and is small, so let's read it out and put it * into the entropy pool. */ void *otp; otp = kmalloc(size, GFP_KERNEL); if (!otp) { err = -ENOMEM; goto err; } err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size); if (err < 0) { kfree(otp); goto err; } add_device_randomness(otp, err); kfree(otp); nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size, mtd_nvmem_fact_otp_reg_read); if (IS_ERR(nvmem)) { err = PTR_ERR(nvmem); goto err; } mtd->otp_factory_nvmem = nvmem; } } return 0; err: nvmem_unregister(mtd->otp_user_nvmem); /* Don't report error if OTP is not supported. */ if (err == -EOPNOTSUPP) return 0; return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n"); } /** * mtd_device_parse_register - parse partitions and register an MTD device. * * @mtd: the MTD device to register * @types: the list of MTD partition probes to try, see * 'parse_mtd_partitions()' for more information * @parser_data: MTD partition parser-specific data * @parts: fallback partition information to register, if parsing fails; * only valid if %nr_parts > %0 * @nr_parts: the number of partitions in parts, if zero then the full * MTD device is registered if no partition info is found * * This function aggregates MTD partitions parsing (done by * 'parse_mtd_partitions()') and MTD device and partitions registering. It * basically follows the most common pattern found in many MTD drivers: * * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is * registered first. * * Then It tries to probe partitions on MTD device @mtd using parsers * specified in @types (if @types is %NULL, then the default list of parsers * is used, see 'parse_mtd_partitions()' for more information). If none are * found this functions tries to fallback to information specified in * @parts/@nr_parts. * * If no partitions were found this function just registers the MTD device * @mtd and exits. * * Returns zero in case of success and a negative error code in case of failure. */ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, struct mtd_part_parser_data *parser_data, const struct mtd_partition *parts, int nr_parts) { int ret, err; mtd_set_dev_defaults(mtd); ret = mtd_otp_nvmem_add(mtd); if (ret) goto out; if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { ret = add_mtd_device(mtd); if (ret) goto out; } /* Prefer parsed partitions over driver-provided fallback */ ret = parse_mtd_partitions(mtd, types, parser_data); if (ret == -EPROBE_DEFER) goto out; if (ret > 0) ret = 0; else if (nr_parts) ret = add_mtd_partitions(mtd, parts, nr_parts); else if (!device_is_registered(&mtd->dev)) ret = add_mtd_device(mtd); else ret = 0; if (ret) goto out; /* * FIXME: some drivers unfortunately call this function more than once. * So we have to check if we've already assigned the reboot notifier. * * Generally, we can make multiple calls work for most cases, but it * does cause problems with parse_mtd_partitions() above (e.g., * cmdlineparts will register partitions more than once). */ WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, "MTD already registered\n"); if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; register_reboot_notifier(&mtd->reboot_notifier); } out: if (ret) { nvmem_unregister(mtd->otp_user_nvmem); nvmem_unregister(mtd->otp_factory_nvmem); } if (ret && device_is_registered(&mtd->dev)) { err = del_mtd_device(mtd); if (err) pr_err("Error when deleting MTD device (%d)\n", err); } return ret; } EXPORT_SYMBOL_GPL(mtd_device_parse_register); /** * mtd_device_unregister - unregister an existing MTD device. * * @master: the MTD device to unregister. This will unregister both the master * and any partitions if registered. */ int mtd_device_unregister(struct mtd_info *master) { int err; if (master->_reboot) { unregister_reboot_notifier(&master->reboot_notifier); memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier)); } nvmem_unregister(master->otp_user_nvmem); nvmem_unregister(master->otp_factory_nvmem); err = del_mtd_partitions(master); if (err) return err; if (!device_is_registered(&master->dev)) return 0; return del_mtd_device(master); } EXPORT_SYMBOL_GPL(mtd_device_unregister); /** * register_mtd_user - register a 'user' of MTD devices. * @new: pointer to notifier info structure * * Registers a pair of callbacks function to be called upon addition * or removal of MTD devices. Causes the 'add' callback to be immediately * invoked for each MTD device currently present in the system. */ void register_mtd_user (struct mtd_notifier *new) { struct mtd_info *mtd; mutex_lock(&mtd_table_mutex); list_add(&new->list, &mtd_notifiers); __module_get(THIS_MODULE); mtd_for_each_device(mtd) new->add(mtd); mutex_unlock(&mtd_table_mutex); } EXPORT_SYMBOL_GPL(register_mtd_user); /** * unregister_mtd_user - unregister a 'user' of MTD devices. * @old: pointer to notifier info structure * * Removes a callback function pair from the list of 'users' to be * notified upon addition or removal of MTD devices. Causes the * 'remove' callback to be immediately invoked for each MTD device * currently present in the system. */ int unregister_mtd_user (struct mtd_notifier *old) { struct mtd_info *mtd; mutex_lock(&mtd_table_mutex); module_put(THIS_MODULE); mtd_for_each_device(mtd) old->remove(mtd); list_del(&old->list); mutex_unlock(&mtd_table_mutex); return 0; } EXPORT_SYMBOL_GPL(unregister_mtd_user); /** * get_mtd_device - obtain a validated handle for an MTD device * @mtd: last known address of the required MTD device * @num: internal device number of the required MTD device * * Given a number and NULL address, return the num'th entry in the device * table, if any. Given an address and num == -1, search the device table * for a device with that address and return if it's still present. Given * both, return the num'th driver only if its address matches. Return * error code if not. */ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) { struct mtd_info *ret = NULL, *other; int err = -ENODEV; mutex_lock(&mtd_table_mutex); if (num == -1) { mtd_for_each_device(other) { if (other == mtd) { ret = mtd; break; } } } else if (num >= 0) { ret = idr_find(&mtd_idr, num); if (mtd && mtd != ret) ret = NULL; } if (!ret) { ret = ERR_PTR(err); goto out; } err = __get_mtd_device(ret); if (err) ret = ERR_PTR(err); out: mutex_unlock(&mtd_table_mutex); return ret; } EXPORT_SYMBOL_GPL(get_mtd_device); int __get_mtd_device(struct mtd_info *mtd) { struct mtd_info *master = mtd_get_master(mtd); int err; if (master->_get_device) { err = master->_get_device(mtd); if (err) return err; } if (!try_module_get(master->owner)) { if (master->_put_device) master->_put_device(master); return -ENODEV; } while (mtd) { if (mtd != master) kref_get(&mtd->refcnt); mtd = mtd->parent; } if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) kref_get(&master->refcnt); return 0; } EXPORT_SYMBOL_GPL(__get_mtd_device); /** * of_get_mtd_device_by_node - obtain an MTD device associated with a given node * * @np: device tree node */ struct mtd_info *of_get_mtd_device_by_node(struct device_node *np) { struct mtd_info *mtd = NULL; struct mtd_info *tmp; int err; mutex_lock(&mtd_table_mutex); err = -EPROBE_DEFER; mtd_for_each_device(tmp) { if (mtd_get_of_node(tmp) == np) { mtd = tmp; err = __get_mtd_device(mtd); break; } } mutex_unlock(&mtd_table_mutex); return err ? ERR_PTR(err) : mtd; } EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node); /** * get_mtd_device_nm - obtain a validated handle for an MTD device by * device name * @name: MTD device name to open * * This function returns MTD device description structure in case of * success and an error code in case of failure. */ struct mtd_info *get_mtd_device_nm(const char *name) { int err = -ENODEV; struct mtd_info *mtd = NULL, *other; mutex_lock(&mtd_table_mutex); mtd_for_each_device(other) { if (!strcmp(name, other->name)) { mtd = other; break; } } if (!mtd) goto out_unlock; err = __get_mtd_device(mtd); if (err) goto out_unlock; mutex_unlock(&mtd_table_mutex); return mtd; out_unlock: mutex_unlock(&mtd_table_mutex); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(get_mtd_device_nm); void put_mtd_device(struct mtd_info *mtd) { mutex_lock(&mtd_table_mutex); __put_mtd_device(mtd); mutex_unlock(&mtd_table_mutex); } EXPORT_SYMBOL_GPL(put_mtd_device); void __put_mtd_device(struct mtd_info *mtd) { struct mtd_info *master = mtd_get_master(mtd); while (mtd) { /* kref_put() can relese mtd, so keep a reference mtd->parent */ struct mtd_info *parent = mtd->parent; if (mtd != master) kref_put(&mtd->refcnt, mtd_device_release); mtd = parent; } if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) kref_put(&master->refcnt, mtd_device_release); module_put(master->owner); /* must be the last as master can be freed in the _put_device */ if (master->_put_device) master->_put_device(master); } EXPORT_SYMBOL_GPL(__put_mtd_device); /* * Erase is an synchronous operation. Device drivers are epected to return a * negative error code if the operation failed and update instr->fail_addr * to point the portion that was not properly erased. */ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) { struct mtd_info *master = mtd_get_master(mtd); u64 mst_ofs = mtd_get_master_ofs(mtd, 0); struct erase_info adjinstr; int ret; instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; adjinstr = *instr; if (!mtd->erasesize || !master->_erase) return -ENOTSUPP; if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) return -EINVAL; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; if (!instr->len) return 0; ledtrig_mtd_activity(); if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * master->erasesize; adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * master->erasesize) - adjinstr.addr; } adjinstr.addr += mst_ofs; ret = master->_erase(master, &adjinstr); if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { instr->fail_addr = adjinstr.fail_addr - mst_ofs; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { instr->fail_addr = mtd_div_by_eb(instr->fail_addr, master); instr->fail_addr *= mtd->erasesize; } } return ret; } EXPORT_SYMBOL_GPL(mtd_erase); ALLOW_ERROR_INJECTION(mtd_erase, ERRNO); /* * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. */ int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { struct mtd_info *master = mtd_get_master(mtd); *retlen = 0; *virt = NULL; if (phys) *phys = 0; if (!master->_point) return -EOPNOTSUPP; if (from < 0 || from >= mtd->size || len > mtd->size - from) return -EINVAL; if (!len) return 0; from = mtd_get_master_ofs(mtd, from); return master->_point(master, from, len, retlen, virt, phys); } EXPORT_SYMBOL_GPL(mtd_point); /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_unpoint) return -EOPNOTSUPP; if (from < 0 || from >= mtd->size || len > mtd->size - from) return -EINVAL; if (!len) return 0; return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); } EXPORT_SYMBOL_GPL(mtd_unpoint); /* * Allow NOMMU mmap() to directly map the device (if not NULL) * - return the address to which the offset maps * - return -ENOSYS to indicate refusal to do the mapping */ unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags) { size_t retlen; void *virt; int ret; ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); if (ret) return ret; if (retlen != len) { mtd_unpoint(mtd, offset, retlen); return -ENOSYS; } return (unsigned long)virt; } EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, const struct mtd_ecc_stats *old_stats) { struct mtd_ecc_stats diff; if (master == mtd) return; diff = master->ecc_stats; diff.failed -= old_stats->failed; diff.corrected -= old_stats->corrected; while (mtd->parent) { mtd->ecc_stats.failed += diff.failed; mtd->ecc_stats.corrected += diff.corrected; mtd = mtd->parent; } } int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_oob_ops ops = { .len = len, .datbuf = buf, }; int ret; ret = mtd_read_oob(mtd, from, &ops); *retlen = ops.retlen; WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret)); return ret; } EXPORT_SYMBOL_GPL(mtd_read); ALLOW_ERROR_INJECTION(mtd_read, ERRNO); int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_oob_ops ops = { .len = len, .datbuf = (u8 *)buf, }; int ret; ret = mtd_write_oob(mtd, to, &ops); *retlen = ops.retlen; return ret; } EXPORT_SYMBOL_GPL(mtd_write); ALLOW_ERROR_INJECTION(mtd_write, ERRNO); /* * In blackbox flight recorder like scenarios we want to make successful writes * in interrupt context. panic_write() is only intended to be called when its * known the kernel is about to panic and we need the write to succeed. Since * the kernel is not going to be running for much longer, this function can * break locks and delay to ensure the write succeeds (but not sleep). */ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_info *master = mtd_get_master(mtd); *retlen = 0; if (!master->_panic_write) return -EOPNOTSUPP; if (to < 0 || to >= mtd->size || len > mtd->size - to) return -EINVAL; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; if (!len) return 0; if (!master->oops_panic_write) master->oops_panic_write = true; return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_panic_write); static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, struct mtd_oob_ops *ops) { /* * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in * this case. */ if (!ops->datbuf) ops->len = 0; if (!ops->oobbuf) ops->ooblen = 0; if (offs < 0 || offs + ops->len > mtd->size) return -EINVAL; if (ops->ooblen) { size_t maxooblen; if (ops->ooboffs >= mtd_oobavail(mtd, ops)) return -EINVAL; maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - mtd_div_by_ws(offs, mtd)) * mtd_oobavail(mtd, ops)) - ops->ooboffs; if (ops->ooblen > maxooblen) return -EINVAL; } return 0; } static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); int ret; from = mtd_get_master_ofs(mtd, from); if (master->_read_oob) ret = master->_read_oob(master, from, ops); else ret = master->_read(master, from, ops->len, &ops->retlen, ops->datbuf); return ret; } static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); int ret; to = mtd_get_master_ofs(mtd, to); if (master->_write_oob) ret = master->_write_oob(master, to, ops); else ret = master->_write(master, to, ops->len, &ops->retlen, ops->datbuf); return ret; } static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); int ngroups = mtd_pairing_groups(master); int npairs = mtd_wunit_per_eb(master) / ngroups; struct mtd_oob_ops adjops = *ops; unsigned int wunit, oobavail; struct mtd_pairing_info info; int max_bitflips = 0; u32 ebofs, pageofs; loff_t base, pos; ebofs = mtd_mod_by_eb(start, mtd); base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; info.group = 0; info.pair = mtd_div_by_ws(ebofs, mtd); pageofs = mtd_mod_by_ws(ebofs, mtd); oobavail = mtd_oobavail(mtd, ops); while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { int ret; if (info.pair >= npairs) { info.pair = 0; base += master->erasesize; } wunit = mtd_pairing_info_to_wunit(master, &info); pos = mtd_wunit_to_offset(mtd, base, wunit); adjops.len = ops->len - ops->retlen; if (adjops.len > mtd->writesize - pageofs) adjops.len = mtd->writesize - pageofs; adjops.ooblen = ops->ooblen - ops->oobretlen; if (adjops.ooblen > oobavail - adjops.ooboffs) adjops.ooblen = oobavail - adjops.ooboffs; if (read) { ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); if (ret > 0) max_bitflips = max(max_bitflips, ret); } else { ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); } if (ret < 0) return ret; max_bitflips = max(max_bitflips, ret); ops->retlen += adjops.retlen; ops->oobretlen += adjops.oobretlen; adjops.datbuf += adjops.retlen; adjops.oobbuf += adjops.oobretlen; adjops.ooboffs = 0; pageofs = 0; info.pair++; } return max_bitflips; } int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_ecc_stats old_stats = master->ecc_stats; int ret_code; ops->retlen = ops->oobretlen = 0; ret_code = mtd_check_oob_ops(mtd, from, ops); if (ret_code) return ret_code; ledtrig_mtd_activity(); /* Check the validity of a potential fallback on mtd->_read */ if (!master->_read_oob && (!master->_read || ops->oobbuf)) return -EOPNOTSUPP; if (ops->stats) memset(ops->stats, 0, sizeof(*ops->stats)); if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) ret_code = mtd_io_emulated_slc(mtd, from, true, ops); else ret_code = mtd_read_oob_std(mtd, from, ops); mtd_update_ecc_stats(mtd, master, &old_stats); /* * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics * similar to mtd->_read(), returning a non-negative integer * representing max bitflips. In other cases, mtd->_read_oob() may * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). */ if (unlikely(ret_code < 0)) return ret_code; if (mtd->ecc_strength == 0) return 0; /* device lacks ecc */ if (ops->stats) ops->stats->max_bitflips = ret_code; return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; } EXPORT_SYMBOL_GPL(mtd_read_oob); int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); int ret; ops->retlen = ops->oobretlen = 0; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; ret = mtd_check_oob_ops(mtd, to, ops); if (ret) return ret; ledtrig_mtd_activity(); /* Check the validity of a potential fallback on mtd->_write */ if (!master->_write_oob && (!master->_write || ops->oobbuf)) return -EOPNOTSUPP; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) return mtd_io_emulated_slc(mtd, to, false, ops); return mtd_write_oob_std(mtd, to, ops); } EXPORT_SYMBOL_GPL(mtd_write_oob); /** * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section * @mtd: MTD device structure * @section: ECC section. Depending on the layout you may have all the ECC * bytes stored in a single contiguous section, or one section * per ECC chunk (and sometime several sections for a single ECC * ECC chunk) * @oobecc: OOB region struct filled with the appropriate ECC position * information * * This function returns ECC section information in the OOB area. If you want * to get all the ECC bytes information, then you should call * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc) { struct mtd_info *master = mtd_get_master(mtd); memset(oobecc, 0, sizeof(*oobecc)); if (!master || section < 0) return -EINVAL; if (!master->ooblayout || !master->ooblayout->ecc) return -ENOTSUPP; return master->ooblayout->ecc(master, section, oobecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); /** * mtd_ooblayout_free - Get the OOB region definition of a specific free * section * @mtd: MTD device structure * @section: Free section you are interested in. Depending on the layout * you may have all the free bytes stored in a single contiguous * section, or one section per ECC chunk plus an extra section * for the remaining bytes (or other funky layout). * @oobfree: OOB region struct filled with the appropriate free position * information * * This function returns free bytes position in the OOB area. If you want * to get all the free bytes information, then you should call * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobfree) { struct mtd_info *master = mtd_get_master(mtd); memset(oobfree, 0, sizeof(*oobfree)); if (!master || section < 0) return -EINVAL; if (!master->ooblayout || !master->ooblayout->free) return -ENOTSUPP; return master->ooblayout->free(master, section, oobfree); } EXPORT_SYMBOL_GPL(mtd_ooblayout_free); /** * mtd_ooblayout_find_region - Find the region attached to a specific byte * @mtd: mtd info structure * @byte: the byte we are searching for * @sectionp: pointer where the section id will be stored * @oobregion: used to retrieve the ECC position * @iter: iterator function. Should be either mtd_ooblayout_free or * mtd_ooblayout_ecc depending on the region type you're searching for * * This function returns the section id and oobregion information of a * specific byte. For example, say you want to know where the 4th ECC byte is * stored, you'll use: * * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); * * Returns zero on success, a negative error code otherwise. */ static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, int *sectionp, struct mtd_oob_region *oobregion, int (*iter)(struct mtd_info *, int section, struct mtd_oob_region *oobregion)) { int pos = 0, ret, section = 0; memset(oobregion, 0, sizeof(*oobregion)); while (1) { ret = iter(mtd, section, oobregion); if (ret) return ret; if (pos + oobregion->length > byte) break; pos += oobregion->length; section++; } /* * Adjust region info to make it start at the beginning at the * 'start' ECC byte. */ oobregion->offset += byte - pos; oobregion->length -= byte - pos; *sectionp = section; return 0; } /** * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific * ECC byte * @mtd: mtd info structure * @eccbyte: the byte we are searching for * @section: pointer where the section id will be stored * @oobregion: OOB region information * * Works like mtd_ooblayout_find_region() except it searches for a specific ECC * byte. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, int *section, struct mtd_oob_region *oobregion) { return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, mtd_ooblayout_ecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); /** * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer * @mtd: mtd info structure * @buf: destination buffer to store OOB bytes * @oobbuf: OOB buffer * @start: first byte to retrieve * @nbytes: number of bytes to retrieve * @iter: section iterator * * Extract bytes attached to a specific category (ECC or free) * from the OOB buffer and copy them into buf. * * Returns zero on success, a negative error code otherwise. */ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, const u8 *oobbuf, int start, int nbytes, int (*iter)(struct mtd_info *, int section, struct mtd_oob_region *oobregion)) { struct mtd_oob_region oobregion; int section, ret; ret = mtd_ooblayout_find_region(mtd, start, §ion, &oobregion, iter); while (!ret) { int cnt; cnt = min_t(int, nbytes, oobregion.length); memcpy(buf, oobbuf + oobregion.offset, cnt); buf += cnt; nbytes -= cnt; if (!nbytes) break; ret = iter(mtd, ++section, &oobregion); } return ret; } /** * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer * @mtd: mtd info structure * @buf: source buffer to get OOB bytes from * @oobbuf: OOB buffer * @start: first OOB byte to set * @nbytes: number of OOB bytes to set * @iter: section iterator * * Fill the OOB buffer with data provided in buf. The category (ECC or free) * is selected by passing the appropriate iterator. * * Returns zero on success, a negative error code otherwise. */ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, u8 *oobbuf, int start, int nbytes, int (*iter)(struct mtd_info *, int section, struct mtd_oob_region *oobregion)) { struct mtd_oob_region oobregion; int section, ret; ret = mtd_ooblayout_find_region(mtd, start, §ion, &oobregion, iter); while (!ret) { int cnt; cnt = min_t(int, nbytes, oobregion.length); memcpy(oobbuf + oobregion.offset, buf, cnt); buf += cnt; nbytes -= cnt; if (!nbytes) break; ret = iter(mtd, ++section, &oobregion); } return ret; } /** * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category * @mtd: mtd info structure * @iter: category iterator * * Count the number of bytes in a given category. * * Returns a positive value on success, a negative error code otherwise. */ static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, int (*iter)(struct mtd_info *, int section, struct mtd_oob_region *oobregion)) { struct mtd_oob_region oobregion; int section = 0, ret, nbytes = 0; while (1) { ret = iter(mtd, section++, &oobregion); if (ret) { if (ret == -ERANGE) ret = nbytes; break; } nbytes += oobregion.length; } return ret; } /** * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer * @mtd: mtd info structure * @eccbuf: destination buffer to store ECC bytes * @oobbuf: OOB buffer * @start: first ECC byte to retrieve * @nbytes: number of ECC bytes to retrieve * * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, const u8 *oobbuf, int start, int nbytes) { return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, mtd_ooblayout_ecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); /** * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer * @mtd: mtd info structure * @eccbuf: source buffer to get ECC bytes from * @oobbuf: OOB buffer * @start: first ECC byte to set * @nbytes: number of ECC bytes to set * * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, u8 *oobbuf, int start, int nbytes) { return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, mtd_ooblayout_ecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); /** * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer * @mtd: mtd info structure * @databuf: destination buffer to store ECC bytes * @oobbuf: OOB buffer * @start: first ECC byte to retrieve * @nbytes: number of ECC bytes to retrieve * * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, const u8 *oobbuf, int start, int nbytes) { return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, mtd_ooblayout_free); } EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); /** * mtd_ooblayout_set_databytes - set data bytes into the oob buffer * @mtd: mtd info structure * @databuf: source buffer to get data bytes from * @oobbuf: OOB buffer * @start: first ECC byte to set * @nbytes: number of ECC bytes to set * * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, u8 *oobbuf, int start, int nbytes) { return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, mtd_ooblayout_free); } EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); /** * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB * @mtd: mtd info structure * * Works like mtd_ooblayout_count_bytes(), except it count free bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) { return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); } EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); /** * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB * @mtd: mtd info structure * * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. * * Returns zero on success, a negative error code otherwise. */ int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) { return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); /* * Method to access the protection register area, present in some flash * devices. The user data is one time programmable but the factory data is read * only. */ int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_get_fact_prot_info) return -EOPNOTSUPP; if (!len) return 0; return master->_get_fact_prot_info(master, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_info *master = mtd_get_master(mtd); *retlen = 0; if (!master->_read_fact_prot_reg) return -EOPNOTSUPP; if (!len) return 0; return master->_read_fact_prot_reg(master, from, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_get_user_prot_info) return -EOPNOTSUPP; if (!len) return 0; return master->_get_user_prot_info(master, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_info *master = mtd_get_master(mtd); *retlen = 0; if (!master->_read_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; return master->_read_user_prot_reg(master, from, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_info *master = mtd_get_master(mtd); int ret; *retlen = 0; if (!master->_write_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; ret = master->_write_user_prot_reg(master, to, len, retlen, buf); if (ret) return ret; /* * If no data could be written at all, we are out of memory and * must return -ENOSPC. */ return (*retlen) ? 0 : -ENOSPC; } EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_lock_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; return master->_lock_user_prot_reg(master, from, len); } EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_erase_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; return master->_erase_user_prot_reg(master, from, len); } EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); /* Chip-supported device locking */ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_lock) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; } return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_lock); int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_unlock) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; } return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_unlock); int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_info *master = mtd_get_master(mtd); if (!master->_is_locked) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; } return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_is_locked); int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) { struct mtd_info *master = mtd_get_master(mtd); if (ofs < 0 || ofs >= mtd->size) return -EINVAL; if (!master->_block_isreserved) return 0; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isreserved); int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_info *master = mtd_get_master(mtd); if (ofs < 0 || ofs >= mtd->size) return -EINVAL; if (!master->_block_isbad) return 0; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isbad); int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_info *master = mtd_get_master(mtd); int ret; if (!master->_block_markbad) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size) return -EINVAL; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); if (ret) return ret; while (mtd->parent) { mtd->ecc_stats.badblocks++; mtd = mtd->parent; } return 0; } EXPORT_SYMBOL_GPL(mtd_block_markbad); ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO); /* * default_mtd_writev - the default writev method * @mtd: mtd device description object pointer * @vecs: the vectors to write * @count: count of vectors in @vecs * @to: the MTD device offset to write to * @retlen: on exit contains the count of bytes written to the MTD device. * * This function returns zero in case of success and a negative error code in * case of failure. */ static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { unsigned long i; size_t totlen = 0, thislen; int ret = 0; for (i = 0; i < count; i++) { if (!vecs[i].iov_len) continue; ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base); totlen += thislen; if (ret || thislen != vecs[i].iov_len) break; to += vecs[i].iov_len; } *retlen = totlen; return ret; } /* * mtd_writev - the vector-based MTD write method * @mtd: mtd device description object pointer * @vecs: the vectors to write * @count: count of vectors in @vecs * @to: the MTD device offset to write to * @retlen: on exit contains the count of bytes written to the MTD device. * * This function returns zero in case of success and a negative error code in * case of failure. */ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { struct mtd_info *master = mtd_get_master(mtd); *retlen = 0; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; if (!master->_writev) return default_mtd_writev(mtd, vecs, count, to, retlen); return master->_writev(master, vecs, count, mtd_get_master_ofs(mtd, to), retlen); } EXPORT_SYMBOL_GPL(mtd_writev); /** * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size * @mtd: mtd device description object pointer * @size: a pointer to the ideal or maximum size of the allocation, points * to the actual allocation size on success. * * This routine attempts to allocate a contiguous kernel buffer up to * the specified size, backing off the size of the request exponentially * until the request succeeds or until the allocation size falls below * the system page size. This attempts to make sure it does not adversely * impact system performance, so when allocating more than one page, we * ask the memory allocator to avoid re-trying, swapping, writing back * or performing I/O. * * Note, this function also makes sure that the allocated buffer is aligned to * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. * * This is called, for example by mtd_{read,write} and jffs2_scan_medium, * to handle smaller (i.e. degraded) buffer allocations under low- or * fragmented-memory situations where such reduced allocations, from a * requested ideal, are allowed. * * Returns a pointer to the allocated buffer on success; otherwise, NULL. */ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) { gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); void *kbuf; *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); while (*size > min_alloc) { kbuf = kmalloc(*size, flags); if (kbuf) return kbuf; *size >>= 1; *size = ALIGN(*size, mtd->writesize); } /* * For the last resort allocation allow 'kmalloc()' to do all sorts of * things (write-back, dropping caches, etc) by using GFP_KERNEL. */ return kmalloc(*size, GFP_KERNEL); } EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); #ifdef CONFIG_PROC_FS /*====================================================================*/ /* Support for /proc/mtd */ static int mtd_proc_show(struct seq_file *m, void *v) { struct mtd_info *mtd; seq_puts(m, "dev: size erasesize name\n"); mutex_lock(&mtd_table_mutex); mtd_for_each_device(mtd) { seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", mtd->index, (unsigned long long)mtd->size, mtd->erasesize, mtd->name); } mutex_unlock(&mtd_table_mutex); return 0; } #endif /* CONFIG_PROC_FS */ /*====================================================================*/ /* Init code */ static struct backing_dev_info * __init mtd_bdi_init(const char *name) { struct backing_dev_info *bdi; int ret; bdi = bdi_alloc(NUMA_NO_NODE); if (!bdi) return ERR_PTR(-ENOMEM); bdi->ra_pages = 0; bdi->io_pages = 0; /* * We put '-0' suffix to the name to get the same name format as we * used to get. Since this is called only once, we get a unique name. */ ret = bdi_register(bdi, "%.28s-0", name); if (ret) bdi_put(bdi); return ret ? ERR_PTR(ret) : bdi; } static struct proc_dir_entry *proc_mtd; static int __init init_mtd(void) { int ret; ret = class_register(&mtd_class); if (ret) goto err_reg; mtd_bdi = mtd_bdi_init("mtd"); if (IS_ERR(mtd_bdi)) { ret = PTR_ERR(mtd_bdi); goto err_bdi; } proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); ret = init_mtdchar(); if (ret) goto out_procfs; dfs_dir_mtd = debugfs_create_dir("mtd", NULL); debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd, &mtd_expert_analysis_mode); return 0; out_procfs: if (proc_mtd) remove_proc_entry("mtd", NULL); bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); err_bdi: class_unregister(&mtd_class); err_reg: pr_err("Error registering mtd class or bdi: %d\n", ret); return ret; } static void __exit cleanup_mtd(void) { debugfs_remove_recursive(dfs_dir_mtd); cleanup_mtdchar(); if (proc_mtd) remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); idr_destroy(&mtd_idr); } module_init(init_mtd); module_exit(cleanup_mtd); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Core MTD registration and access routines"); |
3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 | // SPDX-License-Identifier: GPL-2.0-only /* * OCB mode implementation * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research * Copyright (C) 2022 - 2024 Intel Corporation * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz> * Funded by: Volkswagen Group Research */ #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> #include <linux/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ) #define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ) #define IEEE80211_OCB_MAX_STA_ENTRIES 128 /** * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks * * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb */ enum ocb_deferred_task_flags { OCB_WORK_HOUSEKEEPING, }; void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; struct sta_info *sta; int band; /* XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n", sdata->name, addr); return; } ocb_dbg(sdata, "Adding new OCB station %pM\n", addr); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); if (!sta) return; /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; sta->sta.deflink.supp_rates[band] = ieee80211_mandatory_rates(sband); spin_lock(&ifocb->incomplete_lock); list_add(&sta->list, &ifocb->incomplete_stations); spin_unlock(&ifocb->incomplete_lock); wiphy_work_queue(local->hw.wiphy, &sdata->work); } static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n", addr, sdata->name); sta_info_move_state(sta, IEEE80211_STA_AUTH); sta_info_move_state(sta, IEEE80211_STA_ASSOC); sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(&sta->deflink); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; ocb_dbg(sdata, "Running ocb housekeeping\n"); ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT); mod_timer(&ifocb->housekeeping_timer, round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL)); } void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifocb->joined != true) return; spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); ieee80211_ocb_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags)) ieee80211_ocb_housekeeping(sdata); } static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.ocb.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); wiphy_work_queue(local->hw.wiphy, &sdata->work); } void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; timer_setup(&ifocb->housekeeping_timer, ieee80211_ocb_housekeeping_timer, 0); INIT_LIST_HEAD(&ifocb->incomplete_stations); spin_lock_init(&ifocb->incomplete_lock); } int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, struct ocb_setup *setup) { struct ieee80211_chan_req chanreq = { .oper = setup->chandef }; struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; int err; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifocb->joined == true) return -EINVAL; sdata->deflink.operating_11g_mode = true; sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; sdata->deflink.needed_rx_chains = sdata->local->rx_chains; err = ieee80211_link_use_channel(&sdata->deflink, &chanreq, IEEE80211_CHANCTX_SHARED); if (err) return err; ieee80211_bss_info_change_notify(sdata, changed); ifocb->joined = true; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); wiphy_work_queue(local->hw.wiphy, &sdata->work); netif_carrier_on(sdata->dev); return 0; } int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); ifocb->joined = false; sta_info_flush(sdata, -1); spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); sta_info_free(local, sta); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); netif_carrier_off(sdata->dev); clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB); ieee80211_link_release_channel(&sdata->deflink); skb_queue_purge(&sdata->skb_queue); timer_delete_sync(&sdata->u.ocb.housekeeping_timer); /* If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again * but will not rearm the timer again because it checks * whether we are connected to the network or not -- at this * point we shouldn't be anymore. */ return 0; } |
17 12 15 15 12 17 17 17 695 456 95 1 33 682 17 668 17 17 9 17 740 786 751 52 7 737 16 748 750 754 723 785 790 789 792 788 788 2 790 787 792 727 51 51 51 51 48 1 1 41 51 321 491 3 1 808 810 1 51 787 767 510 386 857 506 214 186 793 791 46 59 18 784 97 273 699 716 781 779 83 83 72 70 3 68 36 42 14 2 1 1 36 1 4 20 2 2 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 | // SPDX-License-Identifier: GPL-2.0 #include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/hugetlb.h> #include <linux/mmu_context.h> #include <linux/swap.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include "internal.h" /* * We want to know the real level where a entry is located ignoring any * folding of levels which may be happening. For example if p4d is folded then * a missing entry found at level 1 (p4d) is actually at level 0 (pgd). */ static int real_depth(int depth) { if (depth == 3 && PTRS_PER_PMD == 1) depth = 2; if (depth == 2 && PTRS_PER_PUD == 1) depth = 1; if (depth == 1 && PTRS_PER_P4D == 1) depth = 0; return depth; } static int walk_pte_range_inner(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { const struct mm_walk_ops *ops = walk->ops; int err = 0; for (;;) { if (ops->install_pte && pte_none(ptep_get(pte))) { pte_t new_pte; err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte, walk); if (err) break; set_pte_at(walk->mm, addr, pte, new_pte); /* Non-present before, so for arches that need it. */ if (!WARN_ON_ONCE(walk->no_vma)) update_mmu_cache(walk->vma, addr, pte); } else { err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; } if (addr >= end - PAGE_SIZE) break; addr += PAGE_SIZE; pte++; } return err; } static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { pte_t *pte; int err = 0; spinlock_t *ptl; if (walk->no_vma) { /* * pte_offset_map() might apply user-specific validation. * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear), * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them. */ if (walk->mm == &init_mm || addr >= TASK_SIZE) pte = pte_offset_kernel(pmd, addr); else pte = pte_offset_map(pmd, addr); if (pte) { err = walk_pte_range_inner(pte, addr, end, walk); if (walk->mm != &init_mm && addr < TASK_SIZE) pte_unmap(pte); } } else { pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (pte) { err = walk_pte_range_inner(pte, addr, end, walk); pte_unmap_unlock(pte, ptl); } } if (!pte) walk->action = ACTION_AGAIN; return err; } static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; const struct mm_walk_ops *ops = walk->ops; bool has_handler = ops->pte_entry; bool has_install = ops->install_pte; int err = 0; int depth = real_depth(3); pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) { if (has_install) err = __pte_alloc(walk->mm, pmd); else if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; if (!has_install) continue; } walk->action = ACTION_SUBTREE; /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ if (ops->pmd_entry) err = ops->pmd_entry(pmd, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; if (walk->action == ACTION_CONTINUE) continue; if (!has_handler) { /* No handlers for lower page tables. */ if (!has_install) continue; /* Nothing to do. */ /* * We are ONLY installing, so avoid unnecessarily * splitting a present huge page. */ if (pmd_present(*pmd) && (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) continue; } if (walk->vma) split_huge_pmd(walk->vma, pmd, addr); else if (pmd_leaf(*pmd) || !pmd_present(*pmd)) continue; /* Nothing to do. */ err = walk_pte_range(pmd, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; } while (pmd++, addr = next, addr != end); return err; } static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, struct mm_walk *walk) { pud_t *pud; unsigned long next; const struct mm_walk_ops *ops = walk->ops; bool has_handler = ops->pmd_entry || ops->pte_entry; bool has_install = ops->install_pte; int err = 0; int depth = real_depth(2); pud = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); if (pud_none(*pud)) { if (has_install) err = __pmd_alloc(walk->mm, pud, addr); else if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; if (!has_install) continue; } walk->action = ACTION_SUBTREE; if (ops->pud_entry) err = ops->pud_entry(pud, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; if (walk->action == ACTION_CONTINUE) continue; if (!has_handler) { /* No handlers for lower page tables. */ if (!has_install) continue; /* Nothing to do. */ /* * We are ONLY installing, so avoid unnecessarily * splitting a present huge page. */ if (pud_present(*pud) && (pud_trans_huge(*pud) || pud_devmap(*pud))) continue; } if (walk->vma) split_huge_pud(walk->vma, pud, addr); else if (pud_leaf(*pud) || !pud_present(*pud)) continue; /* Nothing to do. */ if (pud_none(*pud)) goto again; err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); return err; } static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) { p4d_t *p4d; unsigned long next; const struct mm_walk_ops *ops = walk->ops; bool has_handler = ops->pud_entry || ops->pmd_entry || ops->pte_entry; bool has_install = ops->install_pte; int err = 0; int depth = real_depth(1); p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) { if (has_install) err = __pud_alloc(walk->mm, p4d, addr); else if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; if (!has_install) continue; } if (ops->p4d_entry) { err = ops->p4d_entry(p4d, addr, next, walk); if (err) break; } if (has_handler || has_install) err = walk_pud_range(p4d, addr, next, walk); if (err) break; } while (p4d++, addr = next, addr != end); return err; } static int walk_pgd_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; const struct mm_walk_ops *ops = walk->ops; bool has_handler = ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry; bool has_install = ops->install_pte; int err = 0; if (walk->pgd) pgd = walk->pgd + pgd_index(addr); else pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { if (has_install) err = __p4d_alloc(walk->mm, pgd, addr); else if (ops->pte_hole) err = ops->pte_hole(addr, next, 0, walk); if (err) break; if (!has_install) continue; } if (ops->pgd_entry) { err = ops->pgd_entry(pgd, addr, next, walk); if (err) break; } if (has_handler || has_install) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); return err; } #ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) { unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); return boundary < end ? boundary : end; } static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct hstate *h = hstate_vma(vma); unsigned long next; unsigned long hmask = huge_page_mask(h); unsigned long sz = huge_page_size(h); pte_t *pte; const struct mm_walk_ops *ops = walk->ops; int err = 0; hugetlb_vma_lock_read(vma); do { next = hugetlb_entry_end(h, addr, end); pte = hugetlb_walk(vma, addr & hmask, sz); if (pte) err = ops->hugetlb_entry(pte, hmask, addr, next, walk); else if (ops->pte_hole) err = ops->pte_hole(addr, next, -1, walk); if (err) break; } while (addr = next, addr != end); hugetlb_vma_unlock_read(vma); return err; } #else /* CONFIG_HUGETLB_PAGE */ static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; } #endif /* CONFIG_HUGETLB_PAGE */ /* * Decide whether we really walk over the current vma on [@start, @end) * or skip it via the returned value. Return 0 if we do walk over the * current vma, and return 1 if we skip the vma. Negative values means * error, where we abort the current walk. */ static int walk_page_test(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; const struct mm_walk_ops *ops = walk->ops; if (ops->test_walk) return ops->test_walk(start, end, walk); /* * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP * range, so we don't walk over it as we do for normal vmas. However, * Some callers are interested in handling hole range and they don't * want to just ignore any single address range. Such users certainly * define their ->pte_hole() callbacks, so let's delegate them to handle * vma(VM_PFNMAP). */ if (vma->vm_flags & VM_PFNMAP) { int err = 1; if (ops->pte_hole) err = ops->pte_hole(start, end, -1, walk); return err ? err : 1; } return 0; } static int __walk_page_range(unsigned long start, unsigned long end, struct mm_walk *walk) { int err = 0; struct vm_area_struct *vma = walk->vma; const struct mm_walk_ops *ops = walk->ops; bool is_hugetlb = is_vm_hugetlb_page(vma); /* We do not support hugetlb PTE installation. */ if (ops->install_pte && is_hugetlb) return -EINVAL; if (ops->pre_vma) { err = ops->pre_vma(start, end, walk); if (err) return err; } if (is_hugetlb) { if (ops->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); if (ops->post_vma) ops->post_vma(walk); return err; } static inline void process_mm_walk_lock(struct mm_struct *mm, enum page_walk_lock walk_lock) { if (walk_lock == PGWALK_RDLOCK) mmap_assert_locked(mm); else mmap_assert_write_locked(mm); } static inline void process_vma_walk_lock(struct vm_area_struct *vma, enum page_walk_lock walk_lock) { #ifdef CONFIG_PER_VMA_LOCK switch (walk_lock) { case PGWALK_WRLOCK: vma_start_write(vma); break; case PGWALK_WRLOCK_VERIFY: vma_assert_write_locked(vma); break; case PGWALK_RDLOCK: /* PGWALK_RDLOCK is handled by process_mm_walk_lock */ break; } #endif } /* * See the comment for walk_page_range(), this performs the heavy lifting of the * operation, only sets no restrictions on how the walk proceeds. * * We usually restrict the ability to install PTEs, but this functionality is * available to internal memory management code and provided in mm/internal.h. */ int walk_page_range_mm(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private) { int err = 0; unsigned long next; struct vm_area_struct *vma; struct mm_walk walk = { .ops = ops, .mm = mm, .private = private, }; if (start >= end) return -EINVAL; if (!walk.mm) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); vma = find_vma(walk.mm, start); do { if (!vma) { /* after the last vma */ walk.vma = NULL; next = end; if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else if (start < vma->vm_start) { /* outside vma */ walk.vma = NULL; next = min(end, vma->vm_start); if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ process_vma_walk_lock(vma, ops->walk_lock); walk.vma = vma; next = min(end, vma->vm_end); vma = find_vma(mm, vma->vm_end); err = walk_page_test(start, next, &walk); if (err > 0) { /* * positive return values are purely for * controlling the pagewalk, so should never * be passed to the callers. */ err = 0; continue; } if (err < 0) break; err = __walk_page_range(start, next, &walk); } if (err) break; } while (start = next, start < end); return err; } /* * Determine if the walk operations specified are permitted to be used for a * page table walk. * * This check is performed on all functions which are parameterised by walk * operations and exposed in include/linux/pagewalk.h. * * Internal memory management code can use the walk_page_range_mm() function to * be able to use all page walking operations. */ static bool check_ops_valid(const struct mm_walk_ops *ops) { /* * The installation of PTEs is solely under the control of memory * management logic and subject to many subtle locking, security and * cache considerations so we cannot permit other users to do so, and * certainly not for exported symbols. */ if (ops->install_pte) return false; return true; } /** * walk_page_range - walk page table with caller specific callbacks * @mm: mm_struct representing the target process of page table walk * @start: start address of the virtual address range * @end: end address of the virtual address range * @ops: operation to call during the walk * @private: private data for callbacks' usage * * Recursively walk the page table tree of the process represented by @mm * within the virtual address range [@start, @end). During walking, we can do * some caller-specific works for each entry, by setting up pmd_entry(), * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these * callbacks, the associated entries/pages are just ignored. * The return values of these callbacks are commonly defined like below: * * - 0 : succeeded to handle the current entry, and if you don't reach the * end address yet, continue to walk. * - >0 : succeeded to handle the current entry, and return to the caller * with caller specific value. * - <0 : failed to handle the current entry, and return to the caller * with error code. * * Before starting to walk page table, some callers want to check whether * they really want to walk over the current vma, typically by checking * its vm_flags. walk_page_test() and @ops->test_walk() are used for this * purpose. * * If operations need to be staged before and committed after a vma is walked, * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), * since it is intended to handle commit-type operations, can't return any * errors. * * struct mm_walk keeps current values of some common data like vma and pmd, * which are useful for the access from callbacks. If you want to pass some * caller-specific data to callbacks, @private should be helpful. * * Locking: * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock, * because these function traverse vma list and/or access to vma's data. */ int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private) { if (!check_ops_valid(ops)) return -EINVAL; return walk_page_range_mm(mm, start, end, ops, private); } /** * walk_page_range_novma - walk a range of pagetables not backed by a vma * @mm: mm_struct representing the target process of page table walk * @start: start address of the virtual address range * @end: end address of the virtual address range * @ops: operation to call during the walk * @pgd: pgd to walk if different from mm->pgd * @private: private data for callbacks' usage * * Similar to walk_page_range() but can walk any page tables even if they are * not backed by VMAs. Because 'unusual' entries may be walked this function * will also not lock the PTEs for the pte_entry() callback. This is useful for * walking the kernel pages tables or page tables for firmware. * * Note: Be careful to walk the kernel pages tables, the caller may be need to * take other effective approaches (mmap lock may be insufficient) to prevent * the intermediate kernel page tables belonging to the specified address range * from being freed (e.g. memory hot-remove). */ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, pgd_t *pgd, void *private) { struct mm_walk walk = { .ops = ops, .mm = mm, .pgd = pgd, .private = private, .no_vma = true }; if (start >= end || !walk.mm) return -EINVAL; if (!check_ops_valid(ops)) return -EINVAL; /* * 1) For walking the user virtual address space: * * The mmap lock protects the page walker from changes to the page * tables during the walk. However a read lock is insufficient to * protect those areas which don't have a VMA as munmap() detaches * the VMAs before downgrading to a read lock and actually tearing * down PTEs/page tables. In which case, the mmap write lock should * be hold. * * 2) For walking the kernel virtual address space: * * The kernel intermediate page tables usually do not be freed, so * the mmap map read lock is sufficient. But there are some exceptions. * E.g. memory hot-remove. In which case, the mmap lock is insufficient * to prevent the intermediate kernel pages tables belonging to the * specified address range from being freed. The caller should take * other actions to prevent this race. */ if (mm == &init_mm) mmap_assert_locked(walk.mm); else mmap_assert_write_locked(walk.mm); return walk_pgd_range(start, end, &walk); } int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .mm = vma->vm_mm, .vma = vma, .private = private, }; if (start >= end || !walk.mm) return -EINVAL; if (start < vma->vm_start || end > vma->vm_end) return -EINVAL; if (!check_ops_valid(ops)) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(start, end, &walk); } int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .mm = vma->vm_mm, .vma = vma, .private = private, }; if (!walk.mm) return -EINVAL; if (!check_ops_valid(ops)) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(vma->vm_start, vma->vm_end, &walk); } /** * walk_page_mapping - walk all memory areas mapped into a struct address_space. * @mapping: Pointer to the struct address_space * @first_index: First page offset in the address_space * @nr: Number of incremental page offsets to cover * @ops: operation to call during the walk * @private: private data for callbacks' usage * * This function walks all memory areas mapped into a struct address_space. * The walk is limited to only the given page-size index range, but if * the index boundaries cross a huge page-table entry, that entry will be * included. * * Also see walk_page_range() for additional information. * * Locking: * This function can't require that the struct mm_struct::mmap_lock is held, * since @mapping may be mapped by multiple processes. Instead * @mapping->i_mmap_rwsem must be held. This might have implications in the * callbacks, and it's up tho the caller to ensure that the * struct mm_struct::mmap_lock is not needed. * * Also this means that a caller can't rely on the struct * vm_area_struct::vm_flags to be constant across a call, * except for immutable flags. Callers requiring this shouldn't use * this function. * * Return: 0 on success, negative error code on failure, positive number on * caller defined premature termination. */ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .private = private, }; struct vm_area_struct *vma; pgoff_t vba, vea, cba, cea; unsigned long start_addr, end_addr; int err = 0; if (!check_ops_valid(ops)) return -EINVAL; lockdep_assert_held(&mapping->i_mmap_rwsem); vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, first_index + nr - 1) { /* Clip to the vma */ vba = vma->vm_pgoff; vea = vba + vma_pages(vma); cba = first_index; cba = max(cba, vba); cea = first_index + nr; cea = min(cea, vea); start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; if (start_addr >= end_addr) continue; walk.vma = vma; walk.mm = vma->vm_mm; err = walk_page_test(vma->vm_start, vma->vm_end, &walk); if (err > 0) { err = 0; break; } else if (err < 0) break; err = __walk_page_range(start_addr, end_addr, &walk); if (err) break; } return err; } /** * folio_walk_start - walk the page tables to a folio * @fw: filled with information on success. * @vma: the VMA. * @addr: the virtual address to use for the page table walk. * @flags: flags modifying which folios to walk to. * * Walk the page tables using @addr in a given @vma to a mapped folio and * return the folio, making sure that the page table entry referenced by * @addr cannot change until folio_walk_end() was called. * * As default, this function returns only folios that are not special (e.g., not * the zeropage) and never returns folios that are supposed to be ignored by the * VM as documented by vm_normal_page(). If requested, zeropages will be * returned as well. * * As default, this function only considers present page table entries. * If requested, it will also consider migration entries. * * If this function returns NULL it might either indicate "there is nothing" or * "there is nothing suitable". * * On success, @fw is filled and the function returns the folio while the PTL * is still held and folio_walk_end() must be called to clean up, * releasing any held locks. The returned folio must *not* be used after the * call to folio_walk_end(), unless a short-term folio reference is taken before * that call. * * @fw->page will correspond to the page that is effectively referenced by * @addr. However, for migration entries and shared zeropages @fw->page is * set to NULL. Note that large folios might be mapped by multiple page table * entries, and this function will always only lookup a single entry as * specified by @addr, which might or might not cover more than a single page of * the returned folio. * * This function must *not* be used as a naive replacement for * get_user_pages() / pin_user_pages(), especially not to perform DMA or * to carelessly modify page content. This function may *only* be used to grab * short-term folio references, never to grab long-term folio references. * * Using the page table entry pointers in @fw for reading or modifying the * entry should be avoided where possible: however, there might be valid * use cases. * * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care. * For example, PMD page table sharing might require prior unsharing. Also, * logical hugetlb entries might span multiple physical page table entries, * which *must* be modified in a single operation (set_huge_pte_at(), * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might * not correspond to the first physical entry of a logical hugetlb entry. * * The mmap lock must be held in read mode. * * Return: folio pointer on success, otherwise NULL. */ struct folio *folio_walk_start(struct folio_walk *fw, struct vm_area_struct *vma, unsigned long addr, folio_walk_flags_t flags) { unsigned long entry_size; bool expose_page = true; struct page *page; pud_t *pudp, pud; pmd_t *pmdp, pmd; pte_t *ptep, pte; spinlock_t *ptl; pgd_t *pgdp; p4d_t *p4dp; mmap_assert_locked(vma->vm_mm); vma_pgtable_walk_begin(vma); if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end)) goto not_found; pgdp = pgd_offset(vma->vm_mm, addr); if (pgd_none_or_clear_bad(pgdp)) goto not_found; p4dp = p4d_offset(pgdp, addr); if (p4d_none_or_clear_bad(p4dp)) goto not_found; pudp = pud_offset(p4dp, addr); pud = pudp_get(pudp); if (pud_none(pud)) goto not_found; if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && (!pud_present(pud) || pud_leaf(pud))) { ptl = pud_lock(vma->vm_mm, pudp); pud = pudp_get(pudp); entry_size = PUD_SIZE; fw->level = FW_LEVEL_PUD; fw->pudp = pudp; fw->pud = pud; /* * TODO: FW_MIGRATION support for PUD migration entries * once there are relevant users. */ if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) { spin_unlock(ptl); goto not_found; } else if (!pud_leaf(pud)) { spin_unlock(ptl); goto pmd_table; } /* * TODO: vm_normal_page_pud() will be handy once we want to * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs. */ page = pud_page(pud); goto found; } pmd_table: VM_WARN_ON_ONCE(!pud_present(pud) || pud_leaf(pud)); pmdp = pmd_offset(pudp, addr); pmd = pmdp_get_lockless(pmdp); if (pmd_none(pmd)) goto not_found; if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && (!pmd_present(pmd) || pmd_leaf(pmd))) { ptl = pmd_lock(vma->vm_mm, pmdp); pmd = pmdp_get(pmdp); entry_size = PMD_SIZE; fw->level = FW_LEVEL_PMD; fw->pmdp = pmdp; fw->pmd = pmd; if (pmd_none(pmd)) { spin_unlock(ptl); goto not_found; } else if (pmd_present(pmd) && !pmd_leaf(pmd)) { spin_unlock(ptl); goto pte_table; } else if (pmd_present(pmd)) { page = vm_normal_page_pmd(vma, addr, pmd); if (page) { goto found; } else if ((flags & FW_ZEROPAGE) && is_huge_zero_pmd(pmd)) { page = pfn_to_page(pmd_pfn(pmd)); expose_page = false; goto found; } } else if ((flags & FW_MIGRATION) && is_pmd_migration_entry(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); page = pfn_swap_entry_to_page(entry); expose_page = false; goto found; } spin_unlock(ptl); goto not_found; } pte_table: VM_WARN_ON_ONCE(!pmd_present(pmd) || pmd_leaf(pmd)); ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl); if (!ptep) goto not_found; pte = ptep_get(ptep); entry_size = PAGE_SIZE; fw->level = FW_LEVEL_PTE; fw->ptep = ptep; fw->pte = pte; if (pte_present(pte)) { page = vm_normal_page(vma, addr, pte); if (page) goto found; if ((flags & FW_ZEROPAGE) && is_zero_pfn(pte_pfn(pte))) { page = pfn_to_page(pte_pfn(pte)); expose_page = false; goto found; } } else if (!pte_none(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); if ((flags & FW_MIGRATION) && is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); expose_page = false; goto found; } } pte_unmap_unlock(ptep, ptl); not_found: vma_pgtable_walk_end(vma); return NULL; found: if (expose_page) /* Note: Offset from the mapped page, not the folio start. */ fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT); else fw->page = NULL; fw->ptl = ptl; return page_folio(page); } |
11 1 10 11 1 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | // SPDX-License-Identifier: GPL-2.0-only /* * TTL modification target for IP tables * (C) 2000,2005 by Harald Welte <laforge@netfilter.org> * * Hop Limit modification target for ip6tables * Maciej Soltysiak <solt@dns.toxicfilms.tv> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/checksum.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ipt_TTL.h> #include <linux/netfilter_ipv6/ip6t_HL.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target"); MODULE_LICENSE("GPL"); static unsigned int ttl_tg(struct sk_buff *skb, const struct xt_action_param *par) { struct iphdr *iph; const struct ipt_TTL_info *info = par->targinfo; int new_ttl; if (skb_ensure_writable(skb, sizeof(*iph))) return NF_DROP; iph = ip_hdr(skb); switch (info->mode) { case IPT_TTL_SET: new_ttl = info->ttl; break; case IPT_TTL_INC: new_ttl = iph->ttl + info->ttl; if (new_ttl > 255) new_ttl = 255; break; case IPT_TTL_DEC: new_ttl = iph->ttl - info->ttl; if (new_ttl < 0) new_ttl = 0; break; default: new_ttl = iph->ttl; break; } if (new_ttl != iph->ttl) { csum_replace2(&iph->check, htons(iph->ttl << 8), htons(new_ttl << 8)); iph->ttl = new_ttl; } return XT_CONTINUE; } static unsigned int hl_tg6(struct sk_buff *skb, const struct xt_action_param *par) { struct ipv6hdr *ip6h; const struct ip6t_HL_info *info = par->targinfo; int new_hl; if (skb_ensure_writable(skb, sizeof(*ip6h))) return NF_DROP; ip6h = ipv6_hdr(skb); switch (info->mode) { case IP6T_HL_SET: new_hl = info->hop_limit; break; case IP6T_HL_INC: new_hl = ip6h->hop_limit + info->hop_limit; if (new_hl > 255) new_hl = 255; break; case IP6T_HL_DEC: new_hl = ip6h->hop_limit - info->hop_limit; if (new_hl < 0) new_hl = 0; break; default: new_hl = ip6h->hop_limit; break; } ip6h->hop_limit = new_hl; return XT_CONTINUE; } static int ttl_tg_check(const struct xt_tgchk_param *par) { const struct ipt_TTL_info *info = par->targinfo; if (info->mode > IPT_TTL_MAXMODE) return -EINVAL; if (info->mode != IPT_TTL_SET && info->ttl == 0) return -EINVAL; return 0; } static int hl_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_HL_info *info = par->targinfo; if (info->mode > IP6T_HL_MAXMODE) return -EINVAL; if (info->mode != IP6T_HL_SET && info->hop_limit == 0) return -EINVAL; return 0; } static struct xt_target hl_tg_reg[] __read_mostly = { { .name = "TTL", .revision = 0, .family = NFPROTO_IPV4, .target = ttl_tg, .targetsize = sizeof(struct ipt_TTL_info), .table = "mangle", .checkentry = ttl_tg_check, .me = THIS_MODULE, }, { .name = "HL", .revision = 0, .family = NFPROTO_IPV6, .target = hl_tg6, .targetsize = sizeof(struct ip6t_HL_info), .table = "mangle", .checkentry = hl_tg6_check, .me = THIS_MODULE, }, }; static int __init hl_tg_init(void) { return xt_register_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg)); } static void __exit hl_tg_exit(void) { xt_unregister_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg)); } module_init(hl_tg_init); module_exit(hl_tg_exit); MODULE_ALIAS("ipt_TTL"); MODULE_ALIAS("ip6t_HL"); |
5125 5121 3610 3609 3610 5129 5127 5125 5130 5132 3611 5123 4033 4036 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 | // SPDX-License-Identifier: GPL-2.0-only /* * Link physical devices with ACPI devices support * * Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2005 Intel Corp. */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi_iort.h> #include <linux/export.h> #include <linux/init.h> #include <linux/list.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/platform_device.h> #include "internal.h" static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); #define PHYSICAL_NODE_STRING "physical_node" #define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) int register_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return -ENODEV; if (type && type->match && type->find_companion) { down_write(&bus_type_sem); list_add_tail(&type->list, &bus_type_list); up_write(&bus_type_sem); pr_info("bus type %s registered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(register_acpi_bus_type); int unregister_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return 0; if (type) { down_write(&bus_type_sem); list_del_init(&type->list); up_write(&bus_type_sem); pr_info("bus type %s unregistered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) { struct acpi_bus_type *tmp, *ret = NULL; down_read(&bus_type_sem); list_for_each_entry(tmp, &bus_type_list, list) { if (tmp->match(dev)) { ret = tmp; break; } } up_read(&bus_type_sem); return ret; } #define FIND_CHILD_MIN_SCORE 1 #define FIND_CHILD_MID_SCORE 2 #define FIND_CHILD_MAX_SCORE 3 static int match_any(struct acpi_device *adev, void *not_used) { return 1; } static bool acpi_dev_has_children(struct acpi_device *adev) { return acpi_dev_for_each_child(adev, match_any, NULL) > 0; } static int find_child_checks(struct acpi_device *adev, bool check_children) { unsigned long long sta; acpi_status status; if (check_children && !acpi_dev_has_children(adev)) return -ENODEV; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); if (status == AE_NOT_FOUND) { /* * Special case: backlight device objects without _STA are * preferred to other objects with the same _ADR value, because * it is more likely that they are actually useful. */ if (adev->pnp.type.backlight) return FIND_CHILD_MID_SCORE; return FIND_CHILD_MIN_SCORE; } if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) return -ENODEV; /* * If the device has a _HID returning a valid ACPI/PNP device ID, it is * better to make it look less attractive here, so that the other device * with the same _ADR value (that may not have a valid device ID) can be * matched going forward. [This means a second spec violation in a row, * so whatever we do here is best effort anyway.] */ if (adev->pnp.type.platform_id) return FIND_CHILD_MIN_SCORE; return FIND_CHILD_MAX_SCORE; } struct find_child_walk_data { struct acpi_device *adev; u64 address; int score; bool check_sta; bool check_children; }; static int check_one_child(struct acpi_device *adev, void *data) { struct find_child_walk_data *wd = data; int score; if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address) return 0; if (!wd->adev) { /* * This is the first matching object, so save it. If it is not * necessary to look for any other matching objects, stop the * search. */ wd->adev = adev; return !(wd->check_sta || wd->check_children); } /* * There is more than one matching device object with the same _ADR * value. That really is unexpected, so we are kind of beyond the scope * of the spec here. We have to choose which one to return, though. * * First, get the score for the previously found object and terminate * the walk if it is maximum. */ if (!wd->score) { score = find_child_checks(wd->adev, wd->check_children); if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* * Second, if the object that has just been found has a better score, * replace the previously found one with it and terminate the walk if * the new score is maximum. */ score = find_child_checks(adev, wd->check_children); if (score > wd->score) { wd->adev = adev; if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* Continue, because there may be better matches. */ return 0; } static struct acpi_device *acpi_find_child(struct acpi_device *parent, u64 address, bool check_children, bool check_sta) { struct find_child_walk_data wd = { .address = address, .check_children = check_children, .check_sta = check_sta, .adev = NULL, .score = 0, }; if (parent) acpi_dev_for_each_child(parent, check_one_child, &wd); return wd.adev; } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children) { return acpi_find_child(parent, address, check_children, true); } EXPORT_SYMBOL_GPL(acpi_find_child_device); struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, acpi_bus_address adr) { return acpi_find_child(adev, adr, false, false); } EXPORT_SYMBOL_GPL(acpi_find_child_by_adr); static void acpi_physnode_link_name(char *buf, unsigned int node_id) { if (node_id > 0) snprintf(buf, PHYSICAL_NODE_NAME_SIZE, PHYSICAL_NODE_STRING "%u", node_id); else strcpy(buf, PHYSICAL_NODE_STRING); } int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) { struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; if (has_acpi_companion(dev)) { if (acpi_dev) { dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { acpi_dev = ACPI_COMPANION(dev); } } if (!acpi_dev) return -EINVAL; acpi_dev_get(acpi_dev); get_device(dev); physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; goto err; } mutex_lock(&acpi_dev->physical_node_lock); /* * Keep the list sorted by node_id so that the IDs of removed nodes can * be recycled easily. */ physnode_list = &acpi_dev->physical_node_list; node_id = 0; list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { /* Sanity check. */ if (pn->dev == dev) { mutex_unlock(&acpi_dev->physical_node_lock); dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); acpi_dev_put(acpi_dev); return 0; } if (pn->node_id == node_id) { physnode_list = &pn->node; node_id++; } } physical_node->node_id = node_id; physical_node->dev = dev; list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); if (retval) dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n", physical_node_name, retval); retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj, "firmware_node"); if (retval) dev_err(dev, "Failed to create link firmware_node (%d)\n", retval); mutex_unlock(&acpi_dev->physical_node_lock); if (acpi_dev->wakeup.flags.valid) device_set_wakeup_capable(dev, true); return 0; err: ACPI_COMPANION_SET(dev, NULL); put_device(dev); acpi_dev_put(acpi_dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; if (!acpi_dev) return 0; mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) if (entry->dev == dev) { char physnode_name[PHYSICAL_NODE_NAME_SIZE]; list_del(&entry->node); acpi_dev->physical_node_count--; acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); ACPI_COMPANION_SET(dev, NULL); /* Drop references taken by acpi_bind_one(). */ put_device(dev); acpi_dev_put(acpi_dev); kfree(entry); break; } mutex_unlock(&acpi_dev->physical_node_lock); return 0; } EXPORT_SYMBOL_GPL(acpi_unbind_one); void acpi_device_notify(struct device *dev) { struct acpi_device *adev; int ret; ret = acpi_bind_one(dev, NULL); if (ret) { struct acpi_bus_type *type = acpi_get_bus_type(dev); if (!type) goto err; adev = type->find_companion(dev); if (!adev) { dev_dbg(dev, "ACPI companion not found\n"); goto err; } ret = acpi_bind_one(dev, adev); if (ret) goto err; if (type->setup) { type->setup(dev); goto done; } } else { adev = ACPI_COMPANION(dev); if (dev_is_pci(dev)) { pci_acpi_setup(dev, adev); goto done; } else if (dev_is_platform(dev)) { acpi_configure_pmsi_domain(dev); } } if (adev->handler && adev->handler->bind) adev->handler->bind(dev); done: acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n", dev_name(dev)); return; err: dev_dbg(dev, "No ACPI support\n"); } void acpi_device_notify_remove(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); if (!adev) return; if (dev_is_pci(dev)) pci_acpi_cleanup(dev, adev); else if (adev->handler && adev->handler->unbind) adev->handler->unbind(dev); acpi_unbind_one(dev); } |
13 14 41 2 22 12 28 15 2 20 4 4 2 24 28 24 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2006-2007 Jiri Kosina */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #ifndef __HID_H #define __HID_H #include <linux/bitops.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mod_devicetable.h> /* hid_device_id */ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/input.h> #include <linux/semaphore.h> #include <linux/mutex.h> #include <linux/power_supply.h> #include <uapi/linux/hid.h> #include <linux/hid_bpf.h> /* * We parse each description item into this structure. Short items data * values are expanded to 32-bit signed int, long items contain a pointer * into the data area. */ struct hid_item { unsigned format; __u8 size; __u8 type; __u8 tag; union { __u8 u8; __s8 s8; __u16 u16; __s16 s16; __u32 u32; __s32 s32; const __u8 *longdata; } data; }; /* * HID report item format */ #define HID_ITEM_FORMAT_SHORT 0 #define HID_ITEM_FORMAT_LONG 1 /* * Special tag indicating long items */ #define HID_ITEM_TAG_LONG 15 /* * HID report descriptor item type (prefix bit 2,3) */ #define HID_ITEM_TYPE_MAIN 0 #define HID_ITEM_TYPE_GLOBAL 1 #define HID_ITEM_TYPE_LOCAL 2 #define HID_ITEM_TYPE_RESERVED 3 /* * HID report descriptor main item tags */ #define HID_MAIN_ITEM_TAG_INPUT 8 #define HID_MAIN_ITEM_TAG_OUTPUT 9 #define HID_MAIN_ITEM_TAG_FEATURE 11 #define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10 #define HID_MAIN_ITEM_TAG_END_COLLECTION 12 #define HID_MAIN_ITEM_TAG_RESERVED_MIN 13 #define HID_MAIN_ITEM_TAG_RESERVED_MAX 15 /* * HID report descriptor main item contents */ #define HID_MAIN_ITEM_CONSTANT 0x001 #define HID_MAIN_ITEM_VARIABLE 0x002 #define HID_MAIN_ITEM_RELATIVE 0x004 #define HID_MAIN_ITEM_WRAP 0x008 #define HID_MAIN_ITEM_NONLINEAR 0x010 #define HID_MAIN_ITEM_NO_PREFERRED 0x020 #define HID_MAIN_ITEM_NULL_STATE 0x040 #define HID_MAIN_ITEM_VOLATILE 0x080 #define HID_MAIN_ITEM_BUFFERED_BYTE 0x100 /* * HID report descriptor collection item types */ #define HID_COLLECTION_PHYSICAL 0 #define HID_COLLECTION_APPLICATION 1 #define HID_COLLECTION_LOGICAL 2 #define HID_COLLECTION_NAMED_ARRAY 4 /* * HID report descriptor global item tags */ #define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0 #define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1 #define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2 #define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3 #define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4 #define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5 #define HID_GLOBAL_ITEM_TAG_UNIT 6 #define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7 #define HID_GLOBAL_ITEM_TAG_REPORT_ID 8 #define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9 #define HID_GLOBAL_ITEM_TAG_PUSH 10 #define HID_GLOBAL_ITEM_TAG_POP 11 /* * HID report descriptor local item tags */ #define HID_LOCAL_ITEM_TAG_USAGE 0 #define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1 #define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5 #define HID_LOCAL_ITEM_TAG_STRING_INDEX 7 #define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8 #define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9 #define HID_LOCAL_ITEM_TAG_DELIMITER 10 /* * HID usage tables */ #define HID_USAGE_PAGE 0xffff0000 #define HID_UP_UNDEFINED 0x00000000 #define HID_UP_GENDESK 0x00010000 #define HID_UP_SIMULATION 0x00020000 #define HID_UP_GENDEVCTRLS 0x00060000 #define HID_UP_KEYBOARD 0x00070000 #define HID_UP_LED 0x00080000 #define HID_UP_BUTTON 0x00090000 #define HID_UP_ORDINAL 0x000a0000 #define HID_UP_TELEPHONY 0x000b0000 #define HID_UP_CONSUMER 0x000c0000 #define HID_UP_DIGITIZER 0x000d0000 #define HID_UP_PID 0x000f0000 #define HID_UP_BATTERY 0x00850000 #define HID_UP_CAMERA 0x00900000 #define HID_UP_HPVENDOR 0xff7f0000 #define HID_UP_HPVENDOR2 0xff010000 #define HID_UP_MSVENDOR 0xff000000 #define HID_UP_CUSTOM 0x00ff0000 #define HID_UP_LOGIVENDOR 0xffbc0000 #define HID_UP_LOGIVENDOR2 0xff090000 #define HID_UP_LOGIVENDOR3 0xff430000 #define HID_UP_LNVENDOR 0xffa00000 #define HID_UP_SENSOR 0x00200000 #define HID_UP_ASUSVENDOR 0xff310000 #define HID_UP_GOOGLEVENDOR 0xffd10000 #define HID_USAGE 0x0000ffff #define HID_GD_POINTER 0x00010001 #define HID_GD_MOUSE 0x00010002 #define HID_GD_JOYSTICK 0x00010004 #define HID_GD_GAMEPAD 0x00010005 #define HID_GD_KEYBOARD 0x00010006 #define HID_GD_KEYPAD 0x00010007 #define HID_GD_MULTIAXIS 0x00010008 /* * Microsoft Win8 Wireless Radio Controls extensions CA, see: * http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf */ #define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c /* * System Multi-Axis, see: * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt */ #define HID_GD_SYSTEM_MULTIAXIS 0x0001000e #define HID_GD_X 0x00010030 #define HID_GD_Y 0x00010031 #define HID_GD_Z 0x00010032 #define HID_GD_RX 0x00010033 #define HID_GD_RY 0x00010034 #define HID_GD_RZ 0x00010035 #define HID_GD_SLIDER 0x00010036 #define HID_GD_DIAL 0x00010037 #define HID_GD_WHEEL 0x00010038 #define HID_GD_HATSWITCH 0x00010039 #define HID_GD_BUFFER 0x0001003a #define HID_GD_BYTECOUNT 0x0001003b #define HID_GD_MOTION 0x0001003c #define HID_GD_START 0x0001003d #define HID_GD_SELECT 0x0001003e #define HID_GD_VX 0x00010040 #define HID_GD_VY 0x00010041 #define HID_GD_VZ 0x00010042 #define HID_GD_VBRX 0x00010043 #define HID_GD_VBRY 0x00010044 #define HID_GD_VBRZ 0x00010045 #define HID_GD_VNO 0x00010046 #define HID_GD_FEATURE 0x00010047 #define HID_GD_RESOLUTION_MULTIPLIER 0x00010048 #define HID_GD_SYSTEM_CONTROL 0x00010080 #define HID_GD_UP 0x00010090 #define HID_GD_DOWN 0x00010091 #define HID_GD_RIGHT 0x00010092 #define HID_GD_LEFT 0x00010093 #define HID_GD_DO_NOT_DISTURB 0x0001009b /* Microsoft Win8 Wireless Radio Controls CA usage codes */ #define HID_GD_RFKILL_BTN 0x000100c6 #define HID_GD_RFKILL_LED 0x000100c7 #define HID_GD_RFKILL_SWITCH 0x000100c8 #define HID_DC_BATTERYSTRENGTH 0x00060020 #define HID_CP_CONSUMER_CONTROL 0x000c0001 #define HID_CP_AC_PAN 0x000c0238 #define HID_DG_DIGITIZER 0x000d0001 #define HID_DG_PEN 0x000d0002 #define HID_DG_LIGHTPEN 0x000d0003 #define HID_DG_TOUCHSCREEN 0x000d0004 #define HID_DG_TOUCHPAD 0x000d0005 #define HID_DG_WHITEBOARD 0x000d0006 #define HID_DG_STYLUS 0x000d0020 #define HID_DG_PUCK 0x000d0021 #define HID_DG_FINGER 0x000d0022 #define HID_DG_TIPPRESSURE 0x000d0030 #define HID_DG_BARRELPRESSURE 0x000d0031 #define HID_DG_INRANGE 0x000d0032 #define HID_DG_TOUCH 0x000d0033 #define HID_DG_UNTOUCH 0x000d0034 #define HID_DG_TAP 0x000d0035 #define HID_DG_TRANSDUCER_INDEX 0x000d0038 #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 #define HID_DG_PROGRAMCHANGEKEY 0x000d003a #define HID_DG_BATTERYSTRENGTH 0x000d003b #define HID_DG_INVERT 0x000d003c #define HID_DG_TILT_X 0x000d003d #define HID_DG_TILT_Y 0x000d003e #define HID_DG_TWIST 0x000d0041 #define HID_DG_TIPSWITCH 0x000d0042 #define HID_DG_TIPSWITCH2 0x000d0043 #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 #define HID_DG_PEN_COLOR 0x000d005c #define HID_DG_PEN_LINE_WIDTH 0x000d005e #define HID_DG_PEN_LINE_STYLE 0x000d0070 #define HID_DG_PEN_LINE_STYLE_INK 0x000d0072 #define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073 #define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074 #define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075 #define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076 #define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077 #define HID_CP_CONSUMERCONTROL 0x000c0001 #define HID_CP_NUMERICKEYPAD 0x000c0002 #define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 #define HID_CP_MICROPHONE 0x000c0004 #define HID_CP_HEADPHONE 0x000c0005 #define HID_CP_GRAPHICEQUALIZER 0x000c0006 #define HID_CP_FUNCTIONBUTTONS 0x000c0036 #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba #define HID_CP_VOLUMEUP 0x000c00e9 #define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 #define HID_CP_CHANNELLEFT 0x000c0161 #define HID_CP_CHANNELRIGHT 0x000c0162 #define HID_CP_CHANNELCENTER 0x000c0163 #define HID_CP_CHANNELFRONT 0x000c0164 #define HID_CP_CHANNELCENTERFRONT 0x000c0165 #define HID_CP_CHANNELSIDE 0x000c0166 #define HID_CP_CHANNELSURROUND 0x000c0167 #define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 #define HID_CP_CHANNELTOP 0x000c0169 #define HID_CP_CHANNELUNKNOWN 0x000c016a #define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 #define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 #define HID_DG_DEVICECONFIG 0x000d000e #define HID_DG_DEVICESETTINGS 0x000d0023 #define HID_DG_AZIMUTH 0x000d003f #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 #define HID_DG_CONTACTID 0x000d0051 #define HID_DG_INPUTMODE 0x000d0052 #define HID_DG_DEVICEINDEX 0x000d0053 #define HID_DG_CONTACTCOUNT 0x000d0054 #define HID_DG_CONTACTMAX 0x000d0055 #define HID_DG_SCANTIME 0x000d0056 #define HID_DG_SURFACESWITCH 0x000d0057 #define HID_DG_BUTTONSWITCH 0x000d0058 #define HID_DG_BUTTONTYPE 0x000d0059 #define HID_DG_BARRELSWITCH2 0x000d005a #define HID_DG_TOOLSERIALNUMBER 0x000d005b #define HID_DG_LATENCYMODE 0x000d0060 #define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065 #define HID_BAT_CHARGING 0x00850044 #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 /* * HID connect requests */ #define HID_CONNECT_HIDINPUT BIT(0) #define HID_CONNECT_HIDINPUT_FORCE BIT(1) #define HID_CONNECT_HIDRAW BIT(2) #define HID_CONNECT_HIDDEV BIT(3) #define HID_CONNECT_HIDDEV_FORCE BIT(4) #define HID_CONNECT_FF BIT(5) #define HID_CONNECT_DRIVER BIT(6) #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ HID_CONNECT_HIDDEV|HID_CONNECT_FF) /* * HID device quirks. */ /* * Increase this if you need to configure more HID quirks at module load time */ #define MAX_USBHID_BOOT_QUIRKS 4 /** * DOC: HID quirks * | @HID_QUIRK_NOTOUCH: * | @HID_QUIRK_IGNORE: ignore this device * | @HID_QUIRK_NOGET: * | @HID_QUIRK_HIDDEV_FORCE: * | @HID_QUIRK_BADPAD: * | @HID_QUIRK_MULTI_INPUT: * | @HID_QUIRK_HIDINPUT_FORCE: * | @HID_QUIRK_ALWAYS_POLL: * | @HID_QUIRK_INPUT_PER_APP: * | @HID_QUIRK_X_INVERT: * | @HID_QUIRK_Y_INVERT: * | @HID_QUIRK_SKIP_OUTPUT_REPORTS: * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID: * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP: * | @HID_QUIRK_HAVE_SPECIAL_DRIVER: * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE: * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER * | @HID_QUIRK_FULLSPEED_INTERVAL: * | @HID_QUIRK_NO_INIT_REPORTS: * | @HID_QUIRK_NO_IGNORE: * | @HID_QUIRK_NO_INPUT_SYNC: */ /* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */ #define HID_QUIRK_NOTOUCH BIT(1) #define HID_QUIRK_IGNORE BIT(2) #define HID_QUIRK_NOGET BIT(3) #define HID_QUIRK_HIDDEV_FORCE BIT(4) #define HID_QUIRK_BADPAD BIT(5) #define HID_QUIRK_MULTI_INPUT BIT(6) #define HID_QUIRK_HIDINPUT_FORCE BIT(7) /* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */ /* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ #define HID_QUIRK_ALWAYS_POLL BIT(10) #define HID_QUIRK_INPUT_PER_APP BIT(11) #define HID_QUIRK_X_INVERT BIT(12) #define HID_QUIRK_Y_INVERT BIT(13) #define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) #define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) #define HID_QUIRK_NOINVERT BIT(21) #define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22) #define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) #define HID_QUIRK_NO_INIT_REPORTS BIT(29) #define HID_QUIRK_NO_IGNORE BIT(30) #define HID_QUIRK_NO_INPUT_SYNC BIT(31) /* * HID device groups * * Note: HID_GROUP_ANY is declared in linux/mod_devicetable.h * and has a value of 0x0000 */ #define HID_GROUP_GENERIC 0x0001 #define HID_GROUP_MULTITOUCH 0x0002 #define HID_GROUP_SENSOR_HUB 0x0003 #define HID_GROUP_MULTITOUCH_WIN_8 0x0004 /* * Vendor specific HID device groups */ #define HID_GROUP_RMI 0x0100 #define HID_GROUP_WACOM 0x0101 #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 #define HID_GROUP_STEAM 0x0103 #define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104 #define HID_GROUP_VIVALDI 0x0105 /* * HID protocol status */ #define HID_REPORT_PROTOCOL 1 #define HID_BOOT_PROTOCOL 0 /* * This is the global environment of the parser. This information is * persistent for main-items. The global environment can be saved and * restored with PUSH/POP statements. */ struct hid_global { unsigned usage_page; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned unit; unsigned report_id; unsigned report_size; unsigned report_count; }; /* * This is the local environment. It is persistent up the next main-item. */ #define HID_MAX_USAGES 12288 #define HID_DEFAULT_NUM_COLLECTIONS 16 struct hid_local { unsigned usage[HID_MAX_USAGES]; /* usage array */ u8 usage_size[HID_MAX_USAGES]; /* usage size array */ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ unsigned usage_index; unsigned usage_minimum; unsigned delimiter_depth; unsigned delimiter_branch; }; /* * This is the collection stack. We climb up the stack to determine * application and function of each field. */ struct hid_collection { int parent_idx; /* device->collection */ unsigned type; unsigned usage; unsigned level; }; struct hid_usage { unsigned hid; /* hid usage code */ unsigned collection_index; /* index into collection array */ unsigned usage_index; /* index into usage array */ __s8 resolution_multiplier;/* Effective Resolution Multiplier (HUT v1.12, 4.3.1), default: 1 */ /* hidinput data */ __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ __s16 hat_min; /* hat switch fun */ __s16 hat_max; /* ditto */ __s16 hat_dir; /* ditto */ __s16 wheel_accumulated; /* hi-res wheel */ }; struct hid_input; struct hid_field { unsigned physical; /* physical usage for this field */ unsigned logical; /* logical usage for this field */ unsigned application; /* application usage for this field */ struct hid_usage *usage; /* usage table for this function */ unsigned maxusage; /* maximum usage index */ unsigned flags; /* main-item flags (i.e. volatile,array,constant) */ unsigned report_offset; /* bit offset in the report */ unsigned report_size; /* size of this field in the report */ unsigned report_count; /* number of this field in the report */ unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ __s32 *new_value; /* newly read value(s) */ __s32 *usages_priorities; /* priority of each usage when reading the report * bits 8-16 are reserved for hid-input usage */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned unit; bool ignored; /* this field is ignored in this event */ struct hid_report *report; /* associated report */ unsigned index; /* index into report->field[] */ /* hidinput data */ struct hid_input *hidinput; /* associated input structure */ __u16 dpad; /* dpad input code */ unsigned int slot_idx; /* slot index in a report */ }; #define HID_MAX_FIELDS 256 struct hid_field_entry { struct list_head list; struct hid_field *field; unsigned int index; __s32 priority; }; struct hid_report { struct list_head list; struct list_head hidinput_list; struct list_head field_entry_list; /* ordered list of input fields */ unsigned int id; /* id of this report */ enum hid_report_type type; /* report type */ unsigned int application; /* application usage for this report */ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */ unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ struct hid_device *device; /* associated device */ /* tool related state */ bool tool_active; /* whether the current tool is active */ unsigned int tool; /* BTN_TOOL_* */ }; #define HID_MAX_IDS 256 struct hid_report_enum { unsigned numbered; struct list_head report_list; struct hid_report *report_id_hash[HID_MAX_IDS]; }; #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ #define HID_MAX_BUFFER_SIZE 16384 /* 16kb */ #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ #define HID_OUTPUT_FIFO_SIZE 64 struct hid_control_fifo { unsigned char dir; struct hid_report *report; char *raw_report; }; struct hid_output_fifo { struct hid_report *report; char *raw_report; }; #define HID_CLAIMED_INPUT BIT(0) #define HID_CLAIMED_HIDDEV BIT(1) #define HID_CLAIMED_HIDRAW BIT(2) #define HID_CLAIMED_DRIVER BIT(3) #define HID_STAT_ADDED BIT(0) #define HID_STAT_PARSED BIT(1) #define HID_STAT_DUP_DETECTED BIT(2) #define HID_STAT_REPROBED BIT(3) struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; const char *name; struct list_head reports; /* the list of reports */ unsigned int application; /* application usage for this input */ bool registered; }; enum hid_type { HID_TYPE_OTHER = 0, HID_TYPE_USBMOUSE, HID_TYPE_USBNONE }; enum hid_battery_status { HID_BATTERY_UNKNOWN = 0, HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ }; struct hid_driver; struct hid_ll_driver; struct hid_device { const __u8 *dev_rdesc; /* device report descriptor */ const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */ const __u8 *rdesc; /* currently used report descriptor */ unsigned int dev_rsize; unsigned int bpf_rsize; unsigned int rsize; unsigned int collection_size; /* Number of allocated hid_collections */ struct hid_collection *collection; /* List of HID collections */ unsigned int maxcollection; /* Number of parsed collections */ unsigned int maxapplication; /* Number of applications */ __u16 bus; /* BUS ID */ __u16 group; /* Report group */ __u32 vendor; /* Vendor ID */ __u32 product; /* Product ID */ __u32 version; /* HID version */ enum hid_type type; /* device type (mouse, kbd, ...) */ unsigned country; /* HID country */ struct hid_report_enum report_enum[HID_REPORT_TYPES]; struct work_struct led_work; /* delayed LED worker */ struct semaphore driver_input_lock; /* protects the current driver */ struct device dev; /* device */ struct hid_driver *driver; void *devres_group_id; /* ID of probe devres group */ const struct hid_ll_driver *ll_driver; struct mutex ll_open_lock; unsigned int ll_open_count; #ifdef CONFIG_HID_BATTERY_STRENGTH /* * Power supply information for HID devices which report * battery strength. power_supply was successfully registered if * battery is non-NULL. */ struct power_supply *battery; __s32 battery_capacity; __s32 battery_min; __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; __s32 battery_charge_status; enum hid_battery_status battery_status; bool battery_avoid_query; ktime_t battery_ratelimit_time; #endif unsigned long status; /* see STAT flags above */ unsigned claimed; /* Claimed by hidinput, hiddev? */ unsigned quirks; /* Various quirks the device can pull on us */ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */ bool io_started; /* If IO has started */ struct list_head inputs; /* The list of inputs */ void *hiddev; /* The hiddev structure */ void *hidraw; char name[128]; /* Device name */ char phys[64]; /* Device physical location */ char uniq[64]; /* Device unique identifier (serial #) */ void *driver_data; /* temporary hid_ff handling (until moved to the drivers) */ int (*ff_init)(struct hid_device *); /* hiddev event handler */ int (*hiddev_connect)(struct hid_device *, unsigned int); void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, struct hid_usage *, __s32); void (*hiddev_report_event) (struct hid_device *, struct hid_report *); /* debugging support via debugfs */ unsigned short debug; struct dentry *debug_dir; struct dentry *debug_rdesc; struct dentry *debug_events; struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; struct kref ref; unsigned int id; /* system unique id */ #ifdef CONFIG_HID_BPF struct hid_bpf bpf; /* hid-bpf data */ #endif /* CONFIG_HID_BPF */ }; void hiddev_free(struct kref *ref); #define to_hid_device(pdev) \ container_of(pdev, struct hid_device, dev) static inline void *hid_get_drvdata(struct hid_device *hdev) { return dev_get_drvdata(&hdev->dev); } static inline void hid_set_drvdata(struct hid_device *hdev, void *data) { dev_set_drvdata(&hdev->dev, data); } #define HID_GLOBAL_STACK_SIZE 4 #define HID_COLLECTION_STACK_SIZE 4 #define HID_SCAN_FLAG_MT_WIN_8 BIT(0) #define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1) #define HID_SCAN_FLAG_GD_POINTER BIT(2) struct hid_parser { struct hid_global global; struct hid_global global_stack[HID_GLOBAL_STACK_SIZE]; unsigned int global_stack_ptr; struct hid_local local; unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; struct hid_device *device; unsigned int scan_flags; }; struct hid_class_descriptor { __u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__ ((packed)); struct hid_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdHID; __u8 bCountryCode; __u8 bNumDescriptors; struct hid_class_descriptor desc[1]; } __attribute__ ((packed)); #define HID_DEVICE(b, g, ven, prod) \ .bus = (b), .group = (g), .vendor = (ven), .product = (prod) #define HID_USB_DEVICE(ven, prod) \ .bus = BUS_USB, .vendor = (ven), .product = (prod) #define HID_BLUETOOTH_DEVICE(ven, prod) \ .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) #define HID_I2C_DEVICE(ven, prod) \ .bus = BUS_I2C, .vendor = (ven), .product = (prod) #define HID_REPORT_ID(rep) \ .report_type = (rep) #define HID_USAGE_ID(uhid, utype, ucode) \ .usage_hid = (uhid), .usage_type = (utype), .usage_code = (ucode) /* we don't want to catch types and codes equal to 0 */ #define HID_TERMINATOR (HID_ANY_ID - 1) struct hid_report_id { __u32 report_type; }; struct hid_usage_id { __u32 usage_hid; __u32 usage_type; __u32 usage_code; }; /** * struct hid_driver * @name: driver name (e.g. "Footech_bar-wheel") * @id_table: which devices is this driver for (must be non-NULL for probe * to be called) * @dyn_list: list of dynamically added device ids * @dyn_lock: lock protecting @dyn_list * @match: check if the given device is handled by this driver * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) * @raw_event: if report in report_table, this hook is called (NULL means nop) * @usage_table: on which events to call event (NULL means all) * @event: if usage in usage_table, this hook is called (NULL means nop) * @report: this hook is called after parsing a report (NULL means nop) * @report_fixup: called before report descriptor parsing (NULL means nop) * @input_mapping: invoked on input registering before mapping an usage * @input_mapped: invoked on input registering after mapping an usage * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) * * probe should return -errno on error, or 0 on success. During probe, * input will not be passed to raw_event unless hid_device_io_start is * called. * * raw_event and event should return negative on error, any other value will * pass the event on to .event() typically return 0 for success. * * input_mapping shall return a negative value to completely ignore this usage * (e.g. doubled or invalid usage), zero to continue with parsing of this * usage by generic code (no special handling needed) or positive to skip * generic parsing (needed special handling which was done in the hook already) * input_mapped shall return negative to inform the layer that this usage * should not be considered for further processing or zero to notify that * no processing was performed and should be done in a generic manner * Both these functions may be NULL which means the same behavior as returning * zero from them. */ struct hid_driver { char *name; const struct hid_device_id *id_table; struct list_head dyn_list; spinlock_t dyn_lock; bool (*match)(struct hid_device *dev, bool ignore_special_driver); int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); const struct hid_report_id *report_table; int (*raw_event)(struct hid_device *hdev, struct hid_report *report, u8 *data, int size); const struct hid_usage_id *usage_table; int (*event)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value); void (*report)(struct hid_device *hdev, struct hid_report *report); const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, unsigned int *size); int (*input_mapping)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); int (*input_configured)(struct hid_device *hdev, struct hid_input *hidinput); void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); int (*reset_resume)(struct hid_device *hdev); /* private: */ struct device_driver driver; }; #define to_hid_driver(pdrv) \ container_of(pdrv, struct hid_driver, driver) /** * struct hid_ll_driver - low level driver callbacks * @start: called on probe to start the device * @stop: called on remove * @open: called by input layer on open * @close: called by input layer on close * @power: request underlying hardware to enter requested power mode * @parse: this method is called only once to parse the device data, * shouldn't allocate anything to not leak memory * @request: send report request to device (e.g. feature report) * @wait: wait for buffered io to complete (send/recv reports) * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device * @may_wakeup: return if device may act as a wakeup source during system-suspend * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE) */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); void (*stop)(struct hid_device *hdev); int (*open)(struct hid_device *hdev); void (*close)(struct hid_device *hdev); int (*power)(struct hid_device *hdev, int level); int (*parse)(struct hid_device *hdev); void (*request)(struct hid_device *hdev, struct hid_report *report, int reqtype); int (*wait)(struct hid_device *hdev); int (*raw_request) (struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype); int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); bool (*may_wakeup)(struct hid_device *hdev); unsigned int max_buffer_size; }; extern bool hid_is_usb(const struct hid_device *hdev); #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ #define IS_INPUT_APPLICATION(a) \ (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \ || ((a >= HID_DG_DIGITIZER) && (a <= HID_DG_WHITEBOARD)) \ || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \ || (a == HID_GD_WIRELESS_RADIO_CTLS)) /* HID core API */ extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); extern const struct bus_type hid_bus_type; extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define hid_register_driver(driver) \ __hid_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void hid_unregister_driver(struct hid_driver *); /** * module_hid_driver() - Helper macro for registering a HID driver * @__hid_driver: hid_driver struct * * Helper macro for HID drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_hid_driver(__hid_driver) \ module_driver(__hid_driver, hid_register_driver, \ hid_unregister_driver) extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report); extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, unsigned int application, unsigned int usage); int hid_set_field(struct hid_field *, unsigned, __s32); int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, int interrupt); struct hid_field *hidinput_get_led_field(struct hid_device *hid); unsigned int hidinput_count_leds(struct hid_device *hid); __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); void hid_output_report(struct hid_report *report, __u8 *data); int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype); u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); struct hid_device *hid_allocate_device(void); struct hid_report *hid_register_report(struct hid_device *device, enum hid_report_type type, unsigned int id, unsigned int application); int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size); struct hid_report *hid_validate_values(struct hid_device *hid, enum hid_report_type type, unsigned int id, unsigned int field_index, unsigned int report_counts); void hid_setup_resolution_multiplier(struct hid_device *hid); int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); bool hid_match_one_id(const struct hid_device *hdev, const struct hid_device_id *id); const struct hid_device_id *hid_match_id(const struct hid_device *hdev, const struct hid_device_id *id); const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv); bool hid_compare_device_paths(struct hid_device *hdev_a, struct hid_device *hdev_b, char separator); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); #ifdef CONFIG_PM int hid_driver_suspend(struct hid_device *hdev, pm_message_t state); int hid_driver_reset_resume(struct hid_device *hdev); int hid_driver_resume(struct hid_device *hdev); #else static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; } static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; } static inline int hid_driver_resume(struct hid_device *hdev) { return 0; } #endif /** * hid_device_io_start - enable HID input during probe, remove * * @hid: the device * * This should only be called during probe or remove and only be * called by the thread calling probe or remove. It will allow * incoming packets to be delivered to the driver. */ static inline void hid_device_io_start(struct hid_device *hid) { if (hid->io_started) { dev_warn(&hid->dev, "io already started\n"); return; } hid->io_started = true; up(&hid->driver_input_lock); } /** * hid_device_io_stop - disable HID input during probe, remove * * @hid: the device * * Should only be called after hid_device_io_start. It will prevent * incoming packets from going to the driver for the duration of * probe, remove. If called during probe, packets will still go to the * driver after probe is complete. This function should only be called * by the thread calling probe or remove. */ static inline void hid_device_io_stop(struct hid_device *hid) { if (!hid->io_started) { dev_warn(&hid->dev, "io already stopped\n"); return; } hid->io_started = false; down(&hid->driver_input_lock); } /** * hid_map_usage - map usage input bits * * @hidinput: hidinput which we are interested in * @usage: usage to fill in * @bit: pointer to input->{}bit (out parameter) * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type * * The value pointed to by @bit will be set to NULL if either @type is * an unhandled event type, or if @c is out of range for @type. This * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; unsigned long *bmap = NULL; unsigned int limit = 0; switch (type) { case EV_ABS: bmap = input->absbit; limit = ABS_MAX; break; case EV_REL: bmap = input->relbit; limit = REL_MAX; break; case EV_KEY: bmap = input->keybit; limit = KEY_MAX; break; case EV_LED: bmap = input->ledbit; limit = LED_MAX; break; case EV_MSC: bmap = input->mscbit; limit = MSC_MAX; break; } if (unlikely(c > limit || !bmap)) { pr_warn_ratelimited("%s: Invalid code %d type %d\n", input->name, c, type); *bit = NULL; return; } usage->type = type; usage->code = c; *max = limit; *bit = bmap; } /** * hid_map_usage_clear - map usage input bits and clear the input bit * * @hidinput: hidinput which we are interested in * @usage: usage to fill in * @bit: pointer to input->{}bit (out parameter) * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type * * The same as hid_map_usage, except the @c bit is also cleared in supported * bits (@bit). */ static inline void hid_map_usage_clear(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); if (*bit) clear_bit(usage->code, *bit); } /** * hid_parse - parse HW reports * * @hdev: hid device * * Call this from probe after you set up the device (if needed). Your * report_fixup will be called (if non-NULL) after reading raw report from * device before passing it to hid layer for real parsing. */ static inline int __must_check hid_parse(struct hid_device *hdev) { return hid_open_report(hdev); } int __must_check hid_hw_start(struct hid_device *hdev, unsigned int connect_mask); void hid_hw_stop(struct hid_device *hdev); int __must_check hid_hw_open(struct hid_device *hdev); void hid_hw_close(struct hid_device *hdev); void hid_hw_request(struct hid_device *hdev, struct hid_report *report, enum hid_class_request reqtype); int __hid_hw_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source, bool from_bpf); int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source, bool from_bpf); int hid_hw_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, enum hid_class_request reqtype); int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len); /** * hid_hw_power - requests underlying HW to go into given power mode * * @hdev: hid device * @level: requested power level (one of %PM_HINT_* defines) * * This function requests underlying hardware to enter requested power * mode. */ static inline int hid_hw_power(struct hid_device *hdev, int level) { return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0; } /** * hid_hw_idle - send idle request to device * * @hdev: hid device * @report: report to control * @idle: idle state * @reqtype: hid request type */ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, enum hid_class_request reqtype) { if (hdev->ll_driver->idle) return hdev->ll_driver->idle(hdev, report, idle, reqtype); return 0; } /** * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend * * @hdev: hid device */ static inline bool hid_hw_may_wakeup(struct hid_device *hdev) { if (hdev->ll_driver->may_wakeup) return hdev->ll_driver->may_wakeup(hdev); if (hdev->dev.parent) return device_may_wakeup(hdev->dev.parent); return false; } /** * hid_hw_wait - wait for buffered io to complete * * @hdev: hid device */ static inline void hid_hw_wait(struct hid_device *hdev) { if (hdev->ll_driver->wait) hdev->ll_driver->wait(hdev); } /** * hid_report_len - calculate the report length * * @report: the report we want to know the length */ static inline u32 hid_report_len(struct hid_report *report) { return DIV_ROUND_UP(report->size, 8) + (report->id > 0); } int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, int interrupt); /* HID quirks API */ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); void hid_quirks_exit(__u16 bus); #define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__) #define hid_err(hid, fmt, ...) \ dev_err(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_notice(hid, fmt, ...) \ dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_warn(hid, fmt, ...) \ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_info(hid, fmt, ...) \ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_dbg(hid, fmt, ...) \ dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_err_once(hid, fmt, ...) \ dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_notice_once(hid, fmt, ...) \ dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_warn_once(hid, fmt, ...) \ dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_info_once(hid, fmt, ...) \ dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_dbg_once(hid, fmt, ...) \ dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__) #endif |
672 670 736 715 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 | // SPDX-License-Identifier: GPL-2.0-or-later /* * KVM paravirt_ops implementation * * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright IBM Corporation, 2007 * Authors: Anthony Liguori <aliguori@us.ibm.com> */ #define pr_fmt(fmt) "kvm-guest: " fmt #include <linux/context_tracking.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/kvm_para.h> #include <linux/cpu.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hardirq.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/swait.h> #include <linux/syscore_ops.h> #include <linux/cc_platform.h> #include <linux/efi.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/tlbflush.h> #include <asm/apic.h> #include <asm/apicdef.h> #include <asm/hypervisor.h> #include <asm/mtrr.h> #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/svm.h> #include <asm/e820/api.h> DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled); static int kvmapf = 1; static int __init parse_no_kvmapf(char *arg) { kvmapf = 0; return 0; } early_param("no-kvmapf", parse_no_kvmapf); static int steal_acc = 1; static int __init parse_no_stealacc(char *arg) { steal_acc = 0; return 0; } early_param("no-steal-acc", parse_no_stealacc); static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled); static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; static int has_steal_clock = 0; static int has_guest_poll = 0; /* * No need for any "IO delay" on KVM */ static void kvm_io_delay(void) { } #define KVM_TASK_SLEEP_HASHBITS 8 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) struct kvm_task_sleep_node { struct hlist_node link; struct swait_queue_head wq; u32 token; int cpu; }; static struct kvm_task_sleep_head { raw_spinlock_t lock; struct hlist_head list; } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, u32 token) { struct hlist_node *p; hlist_for_each(p, &b->list) { struct kvm_task_sleep_node *n = hlist_entry(p, typeof(*n), link); if (n->token == token) return n; } return NULL; } static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node *e; raw_spin_lock(&b->lock); e = _find_apf_task(b, token); if (e) { /* dummy entry exist -> wake up was delivered ahead of PF */ hlist_del(&e->link); raw_spin_unlock(&b->lock); kfree(e); return false; } n->token = token; n->cpu = smp_processor_id(); init_swait_queue_head(&n->wq); hlist_add_head(&n->link, &b->list); raw_spin_unlock(&b->lock); return true; } /* * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled * @token: Token to identify the sleep node entry * * Invoked from the async pagefault handling code or from the VM exit page * fault handler. In both cases RCU is watching. */ void kvm_async_pf_task_wait_schedule(u32 token) { struct kvm_task_sleep_node n; DECLARE_SWAITQUEUE(wait); lockdep_assert_irqs_disabled(); if (!kvm_async_pf_queue_task(token, &n)) return; for (;;) { prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); if (hlist_unhashed(&n.link)) break; local_irq_enable(); schedule(); local_irq_disable(); } finish_swait(&n.wq, &wait); } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); if (swq_has_sleeper(&n->wq)) swake_up_one(&n->wq); } static void apf_task_wake_all(void) { int i; for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; struct kvm_task_sleep_node *n; struct hlist_node *p, *next; raw_spin_lock(&b->lock); hlist_for_each_safe(p, next, &b->list) { n = hlist_entry(p, typeof(*n), link); if (n->cpu == smp_processor_id()) apf_task_wake_one(n); } raw_spin_unlock(&b->lock); } } void kvm_async_pf_task_wake(u32 token) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node *n, *dummy = NULL; if (token == ~0) { apf_task_wake_all(); return; } again: raw_spin_lock(&b->lock); n = _find_apf_task(b, token); if (!n) { /* * Async #PF not yet handled, add a dummy entry for the token. * Allocating the token must be down outside of the raw lock * as the allocator is preemptible on PREEMPT_RT kernels. */ if (!dummy) { raw_spin_unlock(&b->lock); dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC); /* * Continue looping on allocation failure, eventually * the async #PF will be handled and allocating a new * node will be unnecessary. */ if (!dummy) cpu_relax(); /* * Recheck for async #PF completion before enqueueing * the dummy token to avoid duplicate list entries. */ goto again; } dummy->token = token; dummy->cpu = smp_processor_id(); init_swait_queue_head(&dummy->wq); hlist_add_head(&dummy->link, &b->list); dummy = NULL; } else { apf_task_wake_one(n); } raw_spin_unlock(&b->lock); /* A dummy token might be allocated and ultimately not used. */ kfree(dummy); } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); noinstr u32 kvm_read_and_reset_apf_flags(void) { u32 flags = 0; if (__this_cpu_read(async_pf_enabled)) { flags = __this_cpu_read(apf_reason.flags); __this_cpu_write(apf_reason.flags, 0); } return flags; } EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags); noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) { u32 flags = kvm_read_and_reset_apf_flags(); irqentry_state_t state; if (!flags) return false; state = irqentry_enter(regs); instrumentation_begin(); /* * If the host managed to inject an async #PF into an interrupt * disabled region, then die hard as this is not going to end well * and the host side is seriously broken. */ if (unlikely(!(regs->flags & X86_EFLAGS_IF))) panic("Host injected async #PF in interrupt disabled region\n"); if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { if (unlikely(!(user_mode(regs)))) panic("Host injected async #PF in kernel mode\n"); /* Page is swapped out by the host. */ kvm_async_pf_task_wait_schedule(token); } else { WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags); } instrumentation_end(); irqentry_exit(regs, state); return true; } DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); u32 token; apic_eoi(); inc_irq_stat(irq_hv_callback_count); if (__this_cpu_read(async_pf_enabled)) { token = __this_cpu_read(apf_reason.token); kvm_async_pf_task_wake(token); __this_cpu_write(apf_reason.token, 0); wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1); } set_irq_regs(old_regs); } static void __init paravirt_ops_setup(void) { pv_info.name = "KVM"; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) pv_ops.cpu.io_delay = kvm_io_delay; #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif } static void kvm_register_steal_time(void) { int cpu = smp_processor_id(); struct kvm_steal_time *st = &per_cpu(steal_time, cpu); if (!has_steal_clock) return; wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); pr_debug("stealtime: cpu %d, msr %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); } static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; static notrace __maybe_unused void kvm_guest_apic_eoi_write(void) { /** * This relies on __test_and_clear_bit to modify the memory * in a way that is atomic with respect to the local CPU. * The hypervisor only accesses this memory from the local CPU so * there's no need for lock or memory barriers. * An optimization barrier is implied in apic write. */ if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) return; apic_native_eoi(); } static void kvm_guest_cpu_init(void) { if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) { u64 pa; WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled)); pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(async_pf_enabled, true); pr_debug("setup async PF for cpu %d\n", smp_processor_id()); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { unsigned long pa; /* Size alignment is implied but just to make it explicit. */ BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); __this_cpu_write(kvm_apic_eoi, 0); pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) | KVM_MSR_ENABLED; wrmsrl(MSR_KVM_PV_EOI_EN, pa); } if (has_steal_clock) kvm_register_steal_time(); } static void kvm_pv_disable_apf(void) { if (!__this_cpu_read(async_pf_enabled)) return; wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(async_pf_enabled, false); pr_debug("disable async PF for cpu %d\n", smp_processor_id()); } static void kvm_disable_steal_time(void) { if (!has_steal_clock) return; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } static u64 kvm_steal_clock(int cpu) { u64 steal; struct kvm_steal_time *src; int version; src = &per_cpu(steal_time, cpu); do { version = src->version; virt_rmb(); steal = src->steal; virt_rmb(); } while ((version & 1) || (version != src->version)); return steal; } static inline void __set_percpu_decrypted(void *ptr, unsigned long size) { early_set_memory_decrypted((unsigned long) ptr, size); } /* * Iterate through all possible CPUs and map the memory region pointed * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once. * * Note: we iterate through all possible CPUs to ensure that CPUs * hotplugged will have their per-cpu variable already mapped as * decrypted. */ static void __init sev_map_percpu_data(void) { int cpu; if (cc_vendor != CC_VENDOR_AMD || !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; for_each_possible_cpu(cpu) { __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason)); __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time)); __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi)); } } static void kvm_guest_cpu_offline(bool shutdown) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); kvmclock_disable(); } static int kvm_cpu_online(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_init(); local_irq_restore(flags); return 0; } #ifdef CONFIG_SMP static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); static bool pv_tlb_flush_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && !boot_cpu_has(X86_FEATURE_MWAIT) && (num_possible_cpus() != 1)); } static bool pv_ipi_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) && (num_possible_cpus() != 1)); } static bool pv_sched_yield_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && !boot_cpu_has(X86_FEATURE_MWAIT) && (num_possible_cpus() != 1)); } #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) static void __send_ipi_mask(const struct cpumask *mask, int vector) { unsigned long flags; int cpu, min = 0, max = 0; #ifdef CONFIG_X86_64 __uint128_t ipi_bitmap = 0; #else u64 ipi_bitmap = 0; #endif u32 apic_id, icr; long ret; if (cpumask_empty(mask)) return; local_irq_save(flags); switch (vector) { default: icr = APIC_DM_FIXED | vector; break; case NMI_VECTOR: icr = APIC_DM_NMI; break; } for_each_cpu(cpu, mask) { apic_id = per_cpu(x86_cpu_to_apicid, cpu); if (!ipi_bitmap) { min = max = apic_id; } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { ipi_bitmap <<= min - apic_id; min = apic_id; } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", ret); min = max = apic_id; ipi_bitmap = 0; } __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); } if (ipi_bitmap) { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", ret); } local_irq_restore(flags); } static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) { __send_ipi_mask(mask, vector); } static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); const struct cpumask *local_mask; cpumask_copy(new_mask, mask); cpumask_clear_cpu(this_cpu, new_mask); local_mask = new_mask; __send_ipi_mask(local_mask, vector); } static int __init setup_efi_kvm_sev_migration(void) { efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled"; efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID; efi_status_t status; unsigned long size; bool enabled; if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) || !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) return 0; if (!efi_enabled(EFI_BOOT)) return 0; if (!efi_enabled(EFI_RUNTIME_SERVICES)) { pr_info("%s : EFI runtime services are not enabled\n", __func__); return 0; } size = sizeof(enabled); /* Get variable contents into buffer */ status = efi.get_variable(efi_sev_live_migration_enabled, &efi_variable_guid, NULL, &size, &enabled); if (status == EFI_NOT_FOUND) { pr_info("%s : EFI live migration variable not found\n", __func__); return 0; } if (status != EFI_SUCCESS) { pr_info("%s : EFI variable retrieval failed\n", __func__); return 0; } if (enabled == 0) { pr_info("%s: live migration disabled in EFI\n", __func__); return 0; } pr_info("%s : live migration enabled in EFI\n", __func__); wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); return 1; } late_initcall(setup_efi_kvm_sev_migration); /* * Set the IPI entry points */ static __init void kvm_setup_pv_ipi(void) { apic_update_callback(send_IPI_mask, kvm_send_ipi_mask); apic_update_callback(send_IPI_mask_allbutself, kvm_send_ipi_mask_allbutself); pr_info("setup PV IPIs\n"); } static void kvm_smp_send_call_func_ipi(const struct cpumask *mask) { int cpu; native_send_call_func_ipi(mask); /* Make sure other vCPUs get a chance to run if they need to. */ for_each_cpu(cpu, mask) { if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) { kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu)); break; } } } static void kvm_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info) { u8 state; int cpu; struct kvm_steal_time *src; struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); cpumask_copy(flushmask, cpumask); /* * We have to call flush only on online vCPUs. And * queue flush_on_enter for pre-empted vCPUs */ for_each_cpu(cpu, flushmask) { /* * The local vCPU is never preempted, so we do not explicitly * skip check for local vCPU - it will never be cleared from * flushmask. */ src = &per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { if (try_cmpxchg(&src->preempted, &state, state | KVM_VCPU_FLUSH_TLB)) __cpumask_clear_cpu(cpu, flushmask); } } native_flush_tlb_multi(flushmask, info); } static __init int kvm_alloc_cpumask(void) { int cpu; if (!kvm_para_available() || nopv) return 0; if (pv_tlb_flush_supported() || pv_ipi_supported()) for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); } return 0; } arch_initcall(kvm_alloc_cpumask); static void __init kvm_smp_prepare_boot_cpu(void) { /* * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() * shares the guest physical address with the hypervisor. */ sev_map_percpu_data(); kvm_guest_cpu_init(); native_smp_prepare_boot_cpu(); kvm_spinlock_init(); } static int kvm_cpu_down_prepare(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_offline(false); local_irq_restore(flags); return 0; } #endif static int kvm_suspend(void) { u64 val = 0; kvm_guest_cpu_offline(false); #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) rdmsrl(MSR_KVM_POLL_CONTROL, val); has_guest_poll = !(val & 1); #endif return 0; } static void kvm_resume(void) { kvm_cpu_online(raw_smp_processor_id()); #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) wrmsrl(MSR_KVM_POLL_CONTROL, 0); #endif } static struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static void kvm_pv_guest_cpu_reboot(void *unused) { kvm_guest_cpu_offline(true); } static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; /* * After a PV feature is registered, the host will keep writing to the * registered memory location. If the guest happens to shutdown, this memory * won't be valid. In cases like kexec, in which you install a new kernel, this * means a random memory location will be kept being written. */ #ifdef CONFIG_CRASH_DUMP static void kvm_crash_shutdown(struct pt_regs *regs) { kvm_guest_cpu_offline(true); native_machine_crash_shutdown(regs); } #endif #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP) bool __kvm_vcpu_is_preempted(long cpu); __visible bool __kvm_vcpu_is_preempted(long cpu) { struct kvm_steal_time *src = &per_cpu(steal_time, cpu); return !!(src->preempted & KVM_VCPU_PREEMPTED); } PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); #else #include <asm/asm-offsets.h> extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); /* * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and * restoring to/from the stack. */ #define PV_VCPU_PREEMPTED_ASM \ "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \ "setne %al\n\t" DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted, PV_VCPU_PREEMPTED_ASM, .text); #endif static void __init kvm_guest_init(void) { int i; paravirt_ops_setup(); register_reboot_notifier(&kvm_pv_reboot_nb); for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) raw_spin_lock_init(&async_pf_sleepers[i].lock); if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; static_call_update(pv_steal_clock, kvm_steal_clock); pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) apic_update_callback(eoi, kvm_guest_apic_eoi_write); if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) { static_branch_enable(&kvm_async_pf_enabled); sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_kvm_asyncpf_interrupt); } #ifdef CONFIG_SMP if (pv_tlb_flush_supported()) { pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; pr_info("KVM setup pv remote TLB flush\n"); } smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; if (pv_sched_yield_supported()) { smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; pr_info("setup PV sched yield\n"); } if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online", kvm_cpu_online, kvm_cpu_down_prepare) < 0) pr_err("failed to install cpu hotplug callbacks\n"); #else sev_map_percpu_data(); kvm_guest_cpu_init(); #endif #ifdef CONFIG_CRASH_DUMP machine_ops.crash_shutdown = kvm_crash_shutdown; #endif register_syscore_ops(&kvm_syscore_ops); /* * Hard lockup detection is enabled by default. Disable it, as guests * can get false positives too easily, for example if the host is * overcommitted. */ hardlockup_detector_disable(); } static noinline uint32_t __kvm_cpuid_base(void) { if (boot_cpu_data.cpuid_level < 0) return 0; /* So we don't blow up on old processors */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return hypervisor_cpuid_base(KVM_SIGNATURE, 0); return 0; } static inline uint32_t kvm_cpuid_base(void) { static int kvm_cpuid_base = -1; if (kvm_cpuid_base == -1) kvm_cpuid_base = __kvm_cpuid_base(); return kvm_cpuid_base; } bool kvm_para_available(void) { return kvm_cpuid_base() != 0; } EXPORT_SYMBOL_GPL(kvm_para_available); unsigned int kvm_arch_para_features(void) { return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); } unsigned int kvm_arch_para_hints(void) { return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); } EXPORT_SYMBOL_GPL(kvm_arch_para_hints); static uint32_t __init kvm_detect(void) { return kvm_cpuid_base(); } static void __init kvm_apic_init(void) { #ifdef CONFIG_SMP if (pv_ipi_supported()) kvm_setup_pv_ipi(); #endif } static bool __init kvm_msi_ext_dest_id(void) { return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID); } static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) { kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages, KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); } static void __init kvm_init_platform(void) { if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { unsigned long nr_pages; int i; pv_ops.mmu.notify_page_enc_status_changed = kvm_sev_hc_page_enc_status; /* * Reset the host's shared pages list related to kernel * specific page encryption status settings before we load a * new kernel by kexec. Reset the page encryption status * during early boot instead of just before kexec to avoid SMP * races during kvm_pv_guest_cpu_reboot(). * NOTE: We cannot reset the complete shared pages list * here as we need to retain the UEFI/OVMF firmware * specific settings. */ for (i = 0; i < e820_table->nr_entries; i++) { struct e820_entry *entry = &e820_table->entries[i]; if (entry->type != E820_TYPE_RAM) continue; nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, nr_pages, KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); } /* * Ensure that _bss_decrypted section is marked as decrypted in the * shared pages list. */ early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted, __end_bss_decrypted - __start_bss_decrypted, 0); /* * If not booted using EFI, enable Live migration support. */ if (!efi_enabled(EFI_BOOT)) wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); } kvmclock_init(); x86_platform.apic_post_init = kvm_apic_init; /* Set WB as the default cache mode for SEV-SNP and TDX */ guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK); } #if defined(CONFIG_AMD_MEM_ENCRYPT) static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) { /* RAX and CPL are already in the GHCB */ ghcb_set_rbx(ghcb, regs->bx); ghcb_set_rcx(ghcb, regs->cx); ghcb_set_rdx(ghcb, regs->dx); ghcb_set_rsi(ghcb, regs->si); } static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) { /* No checking of the return state needed */ return true; } #endif const __initconst struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, .type = X86_HYPER_KVM, .init.guest_late_init = kvm_guest_init, .init.x2apic_available = kvm_para_available, .init.msi_ext_dest_id = kvm_msi_ext_dest_id, .init.init_platform = kvm_init_platform, #if defined(CONFIG_AMD_MEM_ENCRYPT) .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare, .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish, #endif }; static __init int activate_jump_labels(void) { if (has_steal_clock) { static_key_slow_inc(¶virt_steal_enabled); if (steal_acc) static_key_slow_inc(¶virt_steal_rq_enabled); } return 0; } arch_initcall(activate_jump_labels); #ifdef CONFIG_PARAVIRT_SPINLOCKS /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ static void kvm_kick_cpu(int cpu) { unsigned long flags = 0; u32 apicid; apicid = per_cpu(x86_cpu_to_apicid, cpu); kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); } #include <asm/qspinlock.h> static void kvm_wait(u8 *ptr, u8 val) { if (in_nmi()) return; /* * halt until it's our turn and kicked. Note that we do safe halt * for irq enabled case to avoid hang when lock info is overwritten * in irq spinlock slowpath and no spurious interrupt occur to save us. */ if (irqs_disabled()) { if (READ_ONCE(*ptr) == val) halt(); } else { local_irq_disable(); /* safe_halt() will enable IRQ */ if (READ_ONCE(*ptr) == val) safe_halt(); else local_irq_enable(); } } /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. */ void __init kvm_spinlock_init(void) { /* * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is * preferred over native qspinlock when vCPU is preempted. */ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { pr_info("PV spinlocks disabled, no host support\n"); return; } /* * Disable PV spinlocks and use native qspinlock when dedicated pCPUs * are available. */ if (kvm_para_has_hint(KVM_HINTS_REALTIME)) { pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n"); goto out; } if (num_possible_cpus() == 1) { pr_info("PV spinlocks disabled, single CPU\n"); goto out; } if (nopvspin) { pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n"); goto out; } pr_info("PV spinlocks enabled\n"); __pv_init_lock_hash(); pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_ops.lock.wait = kvm_wait; pv_ops.lock.kick = kvm_kick_cpu; /* * When PV spinlock is enabled which is preferred over * virt_spin_lock(), virt_spin_lock_key's value is meaningless. * Just disable it anyway. */ out: static_branch_disable(&virt_spin_lock_key); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL static void kvm_disable_host_haltpoll(void *i) { wrmsrl(MSR_KVM_POLL_CONTROL, 0); } static void kvm_enable_host_haltpoll(void *i) { wrmsrl(MSR_KVM_POLL_CONTROL, 1); } void arch_haltpoll_enable(unsigned int cpu) { if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) { pr_err_once("host does not support poll control\n"); pr_err_once("host upgrade recommended\n"); return; } /* Enable guest halt poll disables host halt poll */ smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1); } EXPORT_SYMBOL_GPL(arch_haltpoll_enable); void arch_haltpoll_disable(unsigned int cpu) { if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) return; /* Disable guest halt poll enables host halt poll */ smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1); } EXPORT_SYMBOL_GPL(arch_haltpoll_disable); #endif |
144 145 145 145 145 145 145 145 145 145 145 145 148 148 148 14 11 3 3 3 145 145 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 | // SPDX-License-Identifier: GPL-2.0-or-later /* incoming call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/errqueue.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/icmp.h> #include <linux/gfp.h> #include <linux/circ_buf.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> #include "ar-internal.h" static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, unsigned long user_call_ID) { } /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. */ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, struct rxrpc_backlog *b, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, unsigned long user_call_ID, gfp_t gfp, unsigned int debug_id) { struct rxrpc_call *call, *xcall; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); struct rb_node *parent, **pp; int max, tmp; unsigned int size = RXRPC_BACKLOG_MAX; unsigned int head, tail, call_head, call_tail; max = rx->sk.sk_max_ack_backlog; tmp = rx->sk.sk_ack_backlog; if (tmp >= max) { _leave(" = -ENOBUFS [full %u]", max); return -ENOBUFS; } max -= tmp; /* We don't need more conns and peers than we have calls, but on the * other hand, we shouldn't ever use more peers than conns or conns * than calls. */ call_head = b->call_backlog_head; call_tail = READ_ONCE(b->call_backlog_tail); tmp = CIRC_CNT(call_head, call_tail, size); if (tmp >= max) { _leave(" = -ENOBUFS [enough %u]", tmp); return -ENOBUFS; } max = tmp + 1; head = b->peer_backlog_head; tail = READ_ONCE(b->peer_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_peer *peer; peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc); if (!peer) return -ENOMEM; b->peer_backlog[head] = peer; smp_store_release(&b->peer_backlog_head, (head + 1) & (size - 1)); } head = b->conn_backlog_head; tail = READ_ONCE(b->conn_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_connection *conn; conn = rxrpc_prealloc_service_connection(rxnet, gfp); if (!conn) return -ENOMEM; b->conn_backlog[head] = conn; smp_store_release(&b->conn_backlog_head, (head + 1) & (size - 1)); } /* Now it gets complicated, because calls get registered with the * socket here, with a user ID preassigned by the user. */ call = rxrpc_alloc_call(rx, gfp, debug_id); if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC); __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events); trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), user_call_ID, rxrpc_call_new_prealloc_service); write_lock(&rx->call_lock); /* Check the user ID isn't already in use */ pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto id_in_use; } call->user_call_ID = user_call_ID; call->notify_rx = notify_rx; if (user_attach_call) { rxrpc_get_call(call, rxrpc_call_get_kernel_service); user_attach_call(call, user_call_ID); } rxrpc_get_call(call, rxrpc_call_get_userid); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); set_bit(RXRPC_CALL_HAS_USERID, &call->flags); list_add(&call->sock_link, &rx->sock_calls); write_unlock(&rx->call_lock); rxnet = call->rxnet; spin_lock(&rxnet->call_lock); list_add_tail_rcu(&call->link, &rxnet->calls); spin_unlock(&rxnet->call_lock); b->call_backlog[call_head] = call; smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); return 0; id_in_use: write_unlock(&rx->call_lock); rxrpc_cleanup_call(call); _leave(" = -EBADSLT"); return -EBADSLT; } /* * Allocate the preallocation buffers for incoming service calls. These must * be charged manually. */ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) { struct rxrpc_backlog *b = rx->backlog; if (!b) { b = kzalloc(sizeof(struct rxrpc_backlog), gfp); if (!b) return -ENOMEM; rx->backlog = b; } return 0; } /* * Discard the preallocation on a service. */ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) { struct rxrpc_backlog *b = rx->backlog; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); unsigned int size = RXRPC_BACKLOG_MAX, head, tail; if (!b) return; rx->backlog = NULL; /* Make sure that there aren't any incoming calls in progress before we * clear the preallocation buffers. */ spin_lock_irq(&rx->incoming_lock); spin_unlock_irq(&rx->incoming_lock); head = b->peer_backlog_head; tail = b->peer_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_peer *peer = b->peer_backlog[tail]; rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer); kfree(peer); tail = (tail + 1) & (size - 1); } head = b->conn_backlog_head; tail = b->conn_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_connection *conn = b->conn_backlog[tail]; write_lock(&rxnet->conn_lock); list_del(&conn->link); list_del(&conn->proc_link); write_unlock(&rxnet->conn_lock); kfree(conn); if (atomic_dec_and_test(&rxnet->nr_conns)) wake_up_var(&rxnet->nr_conns); tail = (tail + 1) & (size - 1); } head = b->call_backlog_head; tail = b->call_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_call *call = b->call_backlog[tail]; rcu_assign_pointer(call->socket, rx); if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); if (call->notify_rx) call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put_discard_prealloc); tail = (tail + 1) & (size - 1); } kfree(b); } /* * Allocate a new incoming call from the prealloc pool, along with a connection * and a peer as necessary. */ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, const struct rxrpc_security *sec, struct sockaddr_rxrpc *peer_srx, struct sk_buff *skb) { struct rxrpc_backlog *b = rx->backlog; struct rxrpc_call *call; unsigned short call_head, conn_head, peer_head; unsigned short call_tail, conn_tail, peer_tail; unsigned short call_count, conn_count; /* #calls >= #conns >= #peers must hold true. */ call_head = smp_load_acquire(&b->call_backlog_head); call_tail = b->call_backlog_tail; call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); conn_head = smp_load_acquire(&b->conn_backlog_head); conn_tail = b->conn_backlog_tail; conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); ASSERTCMP(conn_count, >=, call_count); peer_head = smp_load_acquire(&b->peer_backlog_head); peer_tail = b->peer_backlog_tail; ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, conn_count); if (call_count == 0) return NULL; if (!conn) { if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn)) peer = NULL; if (!peer) { peer = b->peer_backlog[peer_tail]; peer->srx = *peer_srx; b->peer_backlog[peer_tail] = NULL; smp_store_release(&b->peer_backlog_tail, (peer_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); rxrpc_new_incoming_peer(local, peer); } /* Now allocate and set up the connection */ conn = b->conn_backlog[conn_tail]; b->conn_backlog[conn_tail] = NULL; smp_store_release(&b->conn_backlog_tail, (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn); conn->peer = peer; rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn); rxrpc_new_incoming_connection(rx, conn, sec, skb); } else { rxrpc_get_connection(conn, rxrpc_conn_get_service_conn); atomic_inc(&conn->active); } /* And now we can allocate and set up a new call */ call = b->call_backlog[call_tail]; b->call_backlog[call_tail] = NULL; smp_store_release(&b->call_backlog_tail, (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); rxrpc_see_call(call, rxrpc_call_see_accept); call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call); call->conn = conn; call->security = conn->security; call->security_ix = conn->security_ix; call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept); call->dest_srx = peer->srx; call->cong_ssthresh = call->peer->cong_ssthresh; call->tx_last_sent = ktime_get_real(); return call; } /* * Set up a new incoming call. Called from the I/O thread. * * If this is for a kernel service, when we allocate the call, it will have * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the * retainer ref obtained from the backlog buffer. Prealloc calls for userspace * services only have the ref from the backlog buffer. * * If we want to report an error, we mark the skb with the packet type and * abort code and return false. */ bool rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, struct sockaddr_rxrpc *peer_srx, struct sk_buff *skb) { const struct rxrpc_security *sec = NULL; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_call *call = NULL; struct rxrpc_sock *rx; _enter(""); /* Don't set up a call for anything other than a DATA packet. */ if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA) return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call); read_lock_irq(&local->services_lock); /* Weed out packets to services we're not offering. Packets that would * begin a call are explicitly rejected and the rest are just * discarded. */ rx = local->service; if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && sp->hdr.serviceId != rx->second_service) ) { if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && sp->hdr.seq == 1) goto unsupported_service; goto discard; } if (!conn) { sec = rxrpc_get_incoming_security(rx, skb); if (!sec) goto unsupported_security; } spin_lock(&rx->incoming_lock); if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || rx->sk.sk_state == RXRPC_CLOSE) { rxrpc_direct_abort(skb, rxrpc_abort_shut_down, RX_INVALID_OPERATION, -ESHUTDOWN); goto no_call; } call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx, skb); if (!call) { skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; goto no_call; } trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); /* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; if (rx->notify_new_call) rx->notify_new_call(&rx->sk, call, call->user_call_ID); spin_lock(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) { conn->state = RXRPC_CONN_SERVICE_CHALLENGING; set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge); } spin_unlock(&conn->state_lock); spin_unlock(&rx->incoming_lock); read_unlock_irq(&local->services_lock); if (hlist_unhashed(&call->error_link)) { spin_lock_irq(&call->peer->lock); hlist_add_head(&call->error_link, &call->peer->error_targets); spin_unlock_irq(&call->peer->lock); } _leave(" = %p{%d}", call, call->debug_id); rxrpc_queue_rx_call_packet(call, skb); rxrpc_put_call(call, rxrpc_call_put_input); return true; unsupported_service: read_unlock_irq(&local->services_lock); return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, RX_INVALID_OPERATION, -EOPNOTSUPP); unsupported_security: read_unlock_irq(&local->services_lock); return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, RX_INVALID_OPERATION, -EKEYREJECTED); no_call: spin_unlock(&rx->incoming_lock); read_unlock_irq(&local->services_lock); _leave(" = f [%u]", skb->mark); return false; discard: read_unlock_irq(&local->services_lock); return true; } /* * Charge up socket with preallocated calls, attaching user call IDs. */ int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) { struct rxrpc_backlog *b = rx->backlog; if (rx->sk.sk_state == RXRPC_CLOSE) return -ESHUTDOWN; return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, GFP_KERNEL, atomic_inc_return(&rxrpc_debug_id)); } /* * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls * @sock: The socket on which to preallocate * @notify_rx: Event notification function for the call * @user_attach_call: Func to attach call to user_call_ID * @user_call_ID: The tag to attach to the preallocated call * @gfp: The allocation conditions. * @debug_id: The tracing debug ID. * * Charge up the socket with preallocated calls, each with a user ID. A * function should be provided to effect the attachment from the user's side. * The user is given a ref to hold on the call. * * Note that the call may be come connected before this function returns. */ int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, unsigned long user_call_ID, gfp_t gfp, unsigned int debug_id) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_backlog *b = rx->backlog; if (sock->sk->sk_state == RXRPC_CLOSE) return -ESHUTDOWN; return rxrpc_service_prealloc_one(rx, b, notify_rx, user_attach_call, user_call_ID, gfp, debug_id); } EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |
110 110 6 105 2 6 330 330 331 2 2 39 39 39 39 39 25 25 25 25 23 24 24 24 56 17 18 4 1 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/errno.h> #include <linux/socket.h> #include <linux/kernel.h> #include <net/dst_metadata.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/inet_dscp.h> int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { int err; struct socket *sock = NULL; struct sockaddr_in udp_addr; err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock); if (err < 0) goto error; if (cfg->bind_ifindex) { err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true); if (err < 0) goto error; } udp_addr.sin_family = AF_INET; udp_addr.sin_addr = cfg->local_ip; udp_addr.sin_port = cfg->local_udp_port; err = kernel_bind(sock, (struct sockaddr *)&udp_addr, sizeof(udp_addr)); if (err < 0) goto error; if (cfg->peer_udp_port) { udp_addr.sin_family = AF_INET; udp_addr.sin_addr = cfg->peer_ip; udp_addr.sin_port = cfg->peer_udp_port; err = kernel_connect(sock, (struct sockaddr *)&udp_addr, sizeof(udp_addr), 0); if (err < 0) goto error; } sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; *sockp = sock; return 0; error: if (sock) { kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); } *sockp = NULL; return err; } EXPORT_SYMBOL(udp_sock_create4); void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *cfg) { struct sock *sk = sock->sk; /* Disable multicast loopback */ inet_clear_bit(MC_LOOP, sk); /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ inet_inc_convert_csum(sk); rcu_assign_sk_user_data(sk, cfg->sk_user_data); udp_sk(sk)->encap_type = cfg->encap_type; udp_sk(sk)->encap_rcv = cfg->encap_rcv; udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv; udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; udp_sk(sk)->encap_destroy = cfg->encap_destroy; udp_sk(sk)->gro_receive = cfg->gro_receive; udp_sk(sk)->gro_complete = cfg->gro_complete; udp_tunnel_encap_enable(sk); } EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, unsigned short type) { struct sock *sk = sock->sk; struct udp_tunnel_info ti; ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; udp_tunnel_nic_add_port(dev, &ti); } EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port); void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, unsigned short type) { struct sock *sk = sock->sk; struct udp_tunnel_info ti; ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; udp_tunnel_nic_del_port(dev, &ti); } EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port); /* Notify netdevs that UDP port started listening */ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct udp_tunnel_info ti; struct net_device *dev; ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; rcu_read_lock(); for_each_netdev_rcu(net, dev) { udp_tunnel_nic_add_port(dev, &ti); } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port); /* Notify netdevs that UDP port is no more listening */ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct udp_tunnel_info ti; struct net_device *dev; ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; rcu_read_lock(); for_each_netdev_rcu(net, dev) { udp_tunnel_nic_del_port(dev, &ti); } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port); void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck) { struct udphdr *uh; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); udp_set_csum(nocheck, skb, src, dst, skb->len); iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); } EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); void udp_tunnel_sock_release(struct socket *sock) { rcu_assign_sk_user_data(sock->sk, NULL); synchronize_rcu(); kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); } EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, const unsigned long *flags, __be64 tunnel_id, int md_size) { struct metadata_dst *tun_dst; struct ip_tunnel_info *info; if (family == AF_INET) tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size); else tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size); if (!tun_dst) return NULL; info = &tun_dst->u.tun_info; info->key.tp_src = udp_hdr(skb)->source; info->key.tp_dst = udp_hdr(skb)->dest; if (udp_hdr(skb)->check) __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); return tun_dst; } EXPORT_SYMBOL_GPL(udp_tun_rx_dst); struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, int oif, __be32 *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 tos, struct dst_cache *dst_cache) { struct rtable *rt = NULL; struct flowi4 fl4; #ifdef CONFIG_DST_CACHE if (dst_cache) { rt = dst_cache_get_ip4(dst_cache, saddr); if (rt) return rt; } #endif memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.flowi4_oif = oif; fl4.daddr = key->u.ipv4.dst; fl4.saddr = key->u.ipv4.src; fl4.fl4_dport = dport; fl4.fl4_sport = sport; fl4.flowi4_tos = tos & INET_DSCP_MASK; fl4.flowi4_flags = key->flow_flags; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); return ERR_PTR(-ENETUNREACH); } if (rt->dst.dev == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr); ip_rt_put(rt); return ERR_PTR(-ELOOP); } #ifdef CONFIG_DST_CACHE if (dst_cache) dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); #endif *saddr = fl4.saddr; return rt; } EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup); MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver"); MODULE_LICENSE("GPL"); |
2 2 2 13 50 8 1 9 2 11 11 19 19 19 7 12 19 12 3 8 1 2 2 8 9 2 2 9 1 8 8 8 4 3 2 8 10 10 10 5 1 8 4 4 4 4 4 6 5 1 1 1 1 1 1 1 7 2 1 3 1 1 1 1 1 1 1 1 1 1 16 16 1 3 1 3 8 1 8 7 1 10 10 1 3 1 1 4 9 1 7 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "opdef.h" #include "kbuf.h" #include "memmap.h" /* BIDs are addressed by a 16-bit field in a CQE */ #define MAX_BIDS_PER_BGID (1 << 16) /* Mapped buffer ring, return io_uring_buf from head */ #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)] struct io_provide_buf { struct file *file; __u64 addr; __u32 len; __u32 bgid; __u32 nbufs; __u16 bid; }; static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len) { while (len) { struct io_uring_buf *buf; u32 this_len; buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask); this_len = min_t(int, len, buf->len); buf->len -= this_len; if (buf->len) { buf->addr += this_len; return false; } bl->head++; len -= this_len; } return true; } bool io_kbuf_commit(struct io_kiocb *req, struct io_buffer_list *bl, int len, int nr) { if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT))) return true; req->flags &= ~REQ_F_BUFFERS_COMMIT; if (unlikely(len < 0)) return true; if (bl->flags & IOBL_INC) return io_kbuf_inc_commit(bl, len); bl->head += nr; return true; } static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, unsigned int bgid) { lockdep_assert_held(&ctx->uring_lock); return xa_load(&ctx->io_bl_xa, bgid); } static int io_buffer_add_list(struct io_ring_ctx *ctx, struct io_buffer_list *bl, unsigned int bgid) { /* * Store buffer group ID and finally mark the list as visible. * The normal lookup doesn't care about the visibility as we're * always under the ->uring_lock, but lookups from mmap do. */ bl->bgid = bgid; guard(mutex)(&ctx->mmap_lock); return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); } void io_kbuf_drop_legacy(struct io_kiocb *req) { if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED))) return; req->buf_index = req->kbuf->bgid; req->flags &= ~REQ_F_BUFFER_SELECTED; kfree(req->kbuf); req->kbuf = NULL; } bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; struct io_buffer *buf; io_ring_submit_lock(ctx, issue_flags); buf = req->kbuf; bl = io_buffer_get_list(ctx, buf->bgid); list_add(&buf->list, &bl->buf_list); req->flags &= ~REQ_F_BUFFER_SELECTED; req->buf_index = buf->bgid; io_ring_submit_unlock(ctx, issue_flags); return true; } static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl) { if (!list_empty(&bl->buf_list)) { struct io_buffer *kbuf; kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); list_del(&kbuf->list); if (*len == 0 || *len > kbuf->len) *len = kbuf->len; if (list_empty(&bl->buf_list)) req->flags |= REQ_F_BL_EMPTY; req->flags |= REQ_F_BUFFER_SELECTED; req->kbuf = kbuf; req->buf_index = kbuf->bid; return u64_to_user_ptr(kbuf->addr); } return NULL; } static int io_provided_buffers_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl, struct iovec *iov) { void __user *buf; buf = io_provided_buffer_select(req, len, bl); if (unlikely(!buf)) return -ENOBUFS; iov[0].iov_base = buf; iov[0].iov_len = *len; return 1; } static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl, unsigned int issue_flags) { struct io_uring_buf_ring *br = bl->buf_ring; __u16 tail, head = bl->head; struct io_uring_buf *buf; void __user *ret; tail = smp_load_acquire(&br->tail); if (unlikely(tail == head)) return NULL; if (head + 1 == tail) req->flags |= REQ_F_BL_EMPTY; buf = io_ring_head_to_buf(br, head, bl->mask); if (*len == 0 || *len > buf->len) *len = buf->len; req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT; req->buf_list = bl; req->buf_index = buf->bid; ret = u64_to_user_ptr(buf->addr); if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { /* * If we came in unlocked, we have no choice but to consume the * buffer here, otherwise nothing ensures that the buffer won't * get used by others. This does mean it'll be pinned until the * IO completes, coming in unlocked means we're being called from * io-wq context and there may be further retries in async hybrid * mode. For the locked case, the caller must call commit when * the transfer completes (or if we get -EAGAIN and must poll of * retry). */ io_kbuf_commit(req, bl, *len, 1); req->buf_list = NULL; } return ret; } void __user *io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; void __user *ret = NULL; io_ring_submit_lock(req->ctx, issue_flags); bl = io_buffer_get_list(ctx, req->buf_index); if (likely(bl)) { if (bl->flags & IOBL_BUF_RING) ret = io_ring_buffer_select(req, len, bl, issue_flags); else ret = io_provided_buffer_select(req, len, bl); } io_ring_submit_unlock(req->ctx, issue_flags); return ret; } /* cap it at a reasonable 256, will be one page even for 4K */ #define PEEK_MAX_IMPORT 256 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, struct io_buffer_list *bl) { struct io_uring_buf_ring *br = bl->buf_ring; struct iovec *iov = arg->iovs; int nr_iovs = arg->nr_iovs; __u16 nr_avail, tail, head; struct io_uring_buf *buf; tail = smp_load_acquire(&br->tail); head = bl->head; nr_avail = min_t(__u16, tail - head, UIO_MAXIOV); if (unlikely(!nr_avail)) return -ENOBUFS; buf = io_ring_head_to_buf(br, head, bl->mask); if (arg->max_len) { u32 len = READ_ONCE(buf->len); size_t needed; if (unlikely(!len)) return -ENOBUFS; needed = (arg->max_len + len - 1) / len; needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT); if (nr_avail > needed) nr_avail = needed; } /* * only alloc a bigger array if we know we have data to map, eg not * a speculative peek operation. */ if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) { iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL); if (unlikely(!iov)) return -ENOMEM; if (arg->mode & KBUF_MODE_FREE) kfree(arg->iovs); arg->iovs = iov; nr_iovs = nr_avail; } else if (nr_avail < nr_iovs) { nr_iovs = nr_avail; } /* set it to max, if not set, so we can use it unconditionally */ if (!arg->max_len) arg->max_len = INT_MAX; req->buf_index = buf->bid; do { u32 len = buf->len; /* truncate end piece, if needed, for non partial buffers */ if (len > arg->max_len) { len = arg->max_len; if (!(bl->flags & IOBL_INC)) buf->len = len; } iov->iov_base = u64_to_user_ptr(buf->addr); iov->iov_len = len; iov++; arg->out_len += len; arg->max_len -= len; if (!arg->max_len) break; buf = io_ring_head_to_buf(br, ++head, bl->mask); } while (--nr_iovs); if (head == tail) req->flags |= REQ_F_BL_EMPTY; req->flags |= REQ_F_BUFFER_RING; req->buf_list = bl; return iov - arg->iovs; } int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; int ret = -ENOENT; io_ring_submit_lock(ctx, issue_flags); bl = io_buffer_get_list(ctx, req->buf_index); if (unlikely(!bl)) goto out_unlock; if (bl->flags & IOBL_BUF_RING) { ret = io_ring_buffers_peek(req, arg, bl); /* * Don't recycle these buffers if we need to go through poll. * Nobody else can use them anyway, and holding on to provided * buffers for a send/write operation would happen on the app * side anyway with normal buffers. Besides, we already * committed them, they cannot be put back in the queue. */ if (ret > 0) { req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE; io_kbuf_commit(req, bl, arg->out_len, ret); } } else { ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs); } out_unlock: io_ring_submit_unlock(ctx, issue_flags); return ret; } int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; int ret; lockdep_assert_held(&ctx->uring_lock); bl = io_buffer_get_list(ctx, req->buf_index); if (unlikely(!bl)) return -ENOENT; if (bl->flags & IOBL_BUF_RING) { ret = io_ring_buffers_peek(req, arg, bl); if (ret > 0) req->flags |= REQ_F_BUFFERS_COMMIT; return ret; } /* don't support multiple buffer selections for legacy */ return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs); } static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr) { struct io_buffer_list *bl = req->buf_list; bool ret = true; if (bl) { ret = io_kbuf_commit(req, bl, len, nr); req->buf_index = bl->bgid; } req->flags &= ~REQ_F_BUFFER_RING; return ret; } unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs) { unsigned int ret; ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) { io_kbuf_drop_legacy(req); return ret; } if (!__io_put_kbuf_ring(req, len, nbufs)) ret |= IORING_CQE_F_BUF_MORE; return ret; } static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer_list *bl, unsigned nbufs) { unsigned i = 0; /* shouldn't happen */ if (!nbufs) return 0; if (bl->flags & IOBL_BUF_RING) { i = bl->buf_ring->tail - bl->head; io_free_region(ctx, &bl->region); /* make sure it's seen as empty */ INIT_LIST_HEAD(&bl->buf_list); bl->flags &= ~IOBL_BUF_RING; return i; } /* protects io_buffers_cache */ lockdep_assert_held(&ctx->uring_lock); while (!list_empty(&bl->buf_list)) { struct io_buffer *nxt; nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); list_del(&nxt->list); kfree(nxt); if (++i == nbufs) return i; cond_resched(); } return i; } static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) { __io_remove_buffers(ctx, bl, -1U); kfree(bl); } void io_destroy_buffers(struct io_ring_ctx *ctx) { struct io_buffer_list *bl; while (1) { unsigned long index = 0; scoped_guard(mutex, &ctx->mmap_lock) { bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT); if (bl) xa_erase(&ctx->io_bl_xa, bl->bgid); } if (!bl) break; io_put_bl(ctx, bl); } } static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) { scoped_guard(mutex, &ctx->mmap_lock) WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl); io_put_bl(ctx, bl); } int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); u64 tmp; if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off || sqe->splice_fd_in) return -EINVAL; tmp = READ_ONCE(sqe->fd); if (!tmp || tmp > MAX_BIDS_PER_BGID) return -EINVAL; memset(p, 0, sizeof(*p)); p->nbufs = tmp; p->bgid = READ_ONCE(sqe->buf_group); return 0; } int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; int ret = 0; io_ring_submit_lock(ctx, issue_flags); ret = -ENOENT; bl = io_buffer_get_list(ctx, p->bgid); if (bl) { ret = -EINVAL; /* can't use provide/remove buffers command on mapped buffers */ if (!(bl->flags & IOBL_BUF_RING)) ret = __io_remove_buffers(ctx, bl, p->nbufs); } io_ring_submit_unlock(ctx, issue_flags); if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); return IOU_OK; } int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { unsigned long size, tmp_check; struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); u64 tmp; if (sqe->rw_flags || sqe->splice_fd_in) return -EINVAL; tmp = READ_ONCE(sqe->fd); if (!tmp || tmp > MAX_BIDS_PER_BGID) return -E2BIG; p->nbufs = tmp; p->addr = READ_ONCE(sqe->addr); p->len = READ_ONCE(sqe->len); if (!p->len) return -EINVAL; if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, &size)) return -EOVERFLOW; if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) return -EOVERFLOW; size = (unsigned long)p->len * p->nbufs; if (!access_ok(u64_to_user_ptr(p->addr), size)) return -EFAULT; p->bgid = READ_ONCE(sqe->buf_group); tmp = READ_ONCE(sqe->off); if (tmp > USHRT_MAX) return -E2BIG; if (tmp + p->nbufs > MAX_BIDS_PER_BGID) return -EINVAL; p->bid = tmp; return 0; } static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, struct io_buffer_list *bl) { struct io_buffer *buf; u64 addr = pbuf->addr; int i, bid = pbuf->bid; for (i = 0; i < pbuf->nbufs; i++) { buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT); if (!buf) break; list_add_tail(&buf->list, &bl->buf_list); buf->addr = addr; buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); buf->bid = bid; buf->bgid = pbuf->bgid; addr += pbuf->len; bid++; cond_resched(); } return i ? 0 : -ENOMEM; } int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; int ret = 0; io_ring_submit_lock(ctx, issue_flags); bl = io_buffer_get_list(ctx, p->bgid); if (unlikely(!bl)) { bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); if (!bl) { ret = -ENOMEM; goto err; } INIT_LIST_HEAD(&bl->buf_list); ret = io_buffer_add_list(ctx, bl, p->bgid); if (ret) { kfree(bl); goto err; } } /* can't add buffers via this command for a mapped buffer ring */ if (bl->flags & IOBL_BUF_RING) { ret = -EINVAL; goto err; } ret = io_add_buffers(ctx, p, bl); err: io_ring_submit_unlock(ctx, issue_flags); if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); return IOU_OK; } int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) { struct io_uring_buf_reg reg; struct io_buffer_list *bl, *free_bl = NULL; struct io_uring_region_desc rd; struct io_uring_buf_ring *br; unsigned long mmap_offset; unsigned long ring_size; int ret; lockdep_assert_held(&ctx->uring_lock); if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; if (reg.resv[0] || reg.resv[1] || reg.resv[2]) return -EINVAL; if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC)) return -EINVAL; if (!is_power_of_2(reg.ring_entries)) return -EINVAL; /* cannot disambiguate full vs empty due to head/tail size */ if (reg.ring_entries >= 65536) return -EINVAL; bl = io_buffer_get_list(ctx, reg.bgid); if (bl) { /* if mapped buffer ring OR classic exists, don't allow */ if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list)) return -EEXIST; io_destroy_bl(ctx, bl); } free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); if (!bl) return -ENOMEM; mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT; ring_size = flex_array_size(br, bufs, reg.ring_entries); memset(&rd, 0, sizeof(rd)); rd.size = PAGE_ALIGN(ring_size); if (!(reg.flags & IOU_PBUF_RING_MMAP)) { rd.user_addr = reg.ring_addr; rd.flags |= IORING_MEM_REGION_TYPE_USER; } ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset); if (ret) goto fail; br = io_region_get_ptr(&bl->region); #ifdef SHM_COLOUR /* * On platforms that have specific aliasing requirements, SHM_COLOUR * is set and we must guarantee that the kernel and user side align * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and * the application mmap's the provided ring buffer. Fail the request * if we, by chance, don't end up with aligned addresses. The app * should use IOU_PBUF_RING_MMAP instead, and liburing will handle * this transparently. */ if (!(reg.flags & IOU_PBUF_RING_MMAP) && ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) { ret = -EINVAL; goto fail; } #endif bl->nr_entries = reg.ring_entries; bl->mask = reg.ring_entries - 1; bl->flags |= IOBL_BUF_RING; bl->buf_ring = br; if (reg.flags & IOU_PBUF_RING_INC) bl->flags |= IOBL_INC; io_buffer_add_list(ctx, bl, reg.bgid); return 0; fail: io_free_region(ctx, &bl->region); kfree(free_bl); return ret; } int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) { struct io_uring_buf_reg reg; struct io_buffer_list *bl; lockdep_assert_held(&ctx->uring_lock); if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; if (reg.resv[0] || reg.resv[1] || reg.resv[2]) return -EINVAL; if (reg.flags) return -EINVAL; bl = io_buffer_get_list(ctx, reg.bgid); if (!bl) return -ENOENT; if (!(bl->flags & IOBL_BUF_RING)) return -EINVAL; scoped_guard(mutex, &ctx->mmap_lock) xa_erase(&ctx->io_bl_xa, bl->bgid); io_put_bl(ctx, bl); return 0; } int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) { struct io_uring_buf_status buf_status; struct io_buffer_list *bl; int i; if (copy_from_user(&buf_status, arg, sizeof(buf_status))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++) if (buf_status.resv[i]) return -EINVAL; bl = io_buffer_get_list(ctx, buf_status.buf_group); if (!bl) return -ENOENT; if (!(bl->flags & IOBL_BUF_RING)) return -EINVAL; buf_status.head = bl->head; if (copy_to_user(arg, &buf_status, sizeof(buf_status))) return -EFAULT; return 0; } struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, unsigned int bgid) { struct io_buffer_list *bl; lockdep_assert_held(&ctx->mmap_lock); bl = xa_load(&ctx->io_bl_xa, bgid); if (!bl || !(bl->flags & IOBL_BUF_RING)) return NULL; return &bl->region; } |
4 3 3 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:net,iface type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 nomatch flag support added */ /* 2 /0 support added */ /* 3 Counters support added */ /* 4 Comments support added */ /* 5 Forceadd support added */ /* 6 skbinfo support added */ /* 7 interface wildcard support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:net,iface"); /* Type specific function prefix */ #define HTYPE hash_netiface #define IP_SET_HASH_WITH_NETS #define IP_SET_HASH_WITH_MULTI #define IP_SET_HASH_WITH_NET0 #define STRSCPY(a, b) strscpy(a, b, IFNAMSIZ) /* IPv4 variant */ struct hash_netiface4_elem_hashed { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; /* Member elements */ struct hash_netiface4_elem { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, const struct hash_netiface4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr; } static bool hash_netiface4_data_list(struct sk_buff *skb, const struct hash_netiface4_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface4_data_next(struct hash_netiface4_elem *next, const struct hash_netiface4_elem *d) { next->ip = d->ip; } #define MTYPE hash_netiface4 #define HOST_MASK 32 #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) #include "ip_set_hash_gen.h" #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static const char *get_physindev_name(const struct sk_buff *skb, struct net *net) { struct net_device *dev = nf_bridge_get_physindev(skb, net); return dev ? dev->name : NULL; } static const char *get_physoutdev_name(const struct sk_buff *skb) { struct net_device *dev = nf_bridge_get_physoutdev(skb); return dev ? dev->name : NULL; } #endif static int hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); e.ip &= ip_set_netmask(e.cidr); #define IFACE(dir) (par->state->dir ? par->state->dir->name : "") #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { e.ip = htonl(ip & ip_set_hostmask(e.cidr)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } if (retried) ip = ntohl(h->next.ip); do { i++; e.ip = htonl(ip); if (i > IPSET_MAX_RANGE) { hash_netiface4_data_next(&h->next, &e); return -ERANGE; } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip++ < ip_to); return ret; } /* IPv6 variant */ struct hash_netiface6_elem_hashed { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; struct hash_netiface6_elem { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, const struct hash_netiface6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr; } static bool hash_netiface6_data_list(struct sk_buff *skb, const struct hash_netiface6_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface6_data_next(struct hash_netiface6_elem *next, const struct hash_netiface6_elem *d) { } #undef MTYPE #undef HOST_MASK #define MTYPE hash_netiface6 #define HOST_MASK 128 #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6_netmask(&e.ip, e.cidr); if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ip6_netmask(&e.ip, e.cidr); nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } static struct ip_set_type hash_netiface_type __read_mostly = { .name = "hash:net,iface", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_netiface_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_netiface_init(void) { return ip_set_type_register(&hash_netiface_type); } static void __exit hash_netiface_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_netiface_type); } module_init(hash_netiface_init); module_exit(hash_netiface_fini); |
41 41 3 1 1 1 3 1 1 1 1 1 1 1 195 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 | // SPDX-License-Identifier: GPL-2.0 /* * Management Component Transport Protocol (MCTP) - routing * implementation. * * This is currently based on a simple routing table, with no dst cache. The * number of routes should stay fairly small, so the lookup cost is small. * * Copyright (c) 2021 Code Construct * Copyright (c) 2021 Google */ #include <linux/idr.h> #include <linux/mctp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/mctp.h> #include <net/mctpdevice.h> #include <net/netlink.h> #include <net/sock.h> static int mctp_neigh_add(struct mctp_dev *mdev, mctp_eid_t eid, enum mctp_neigh_source source, size_t lladdr_len, const void *lladdr) { struct net *net = dev_net(mdev->dev); struct mctp_neigh *neigh; int rc; mutex_lock(&net->mctp.neigh_lock); if (mctp_neigh_lookup(mdev, eid, NULL) == 0) { rc = -EEXIST; goto out; } if (lladdr_len > sizeof(neigh->ha)) { rc = -EINVAL; goto out; } neigh = kzalloc(sizeof(*neigh), GFP_KERNEL); if (!neigh) { rc = -ENOMEM; goto out; } INIT_LIST_HEAD(&neigh->list); neigh->dev = mdev; mctp_dev_hold(neigh->dev); neigh->eid = eid; neigh->source = source; memcpy(neigh->ha, lladdr, lladdr_len); list_add_rcu(&neigh->list, &net->mctp.neighbours); rc = 0; out: mutex_unlock(&net->mctp.neigh_lock); return rc; } static void __mctp_neigh_free(struct rcu_head *rcu) { struct mctp_neigh *neigh = container_of(rcu, struct mctp_neigh, rcu); mctp_dev_put(neigh->dev); kfree(neigh); } /* Removes all neighbour entries referring to a device */ void mctp_neigh_remove_dev(struct mctp_dev *mdev) { struct net *net = dev_net(mdev->dev); struct mctp_neigh *neigh, *tmp; mutex_lock(&net->mctp.neigh_lock); list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) { if (neigh->dev == mdev) { list_del_rcu(&neigh->list); /* TODO: immediate RTM_DELNEIGH */ call_rcu(&neigh->rcu, __mctp_neigh_free); } } mutex_unlock(&net->mctp.neigh_lock); } static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid, enum mctp_neigh_source source) { struct net *net = dev_net(mdev->dev); struct mctp_neigh *neigh, *tmp; bool dropped = false; mutex_lock(&net->mctp.neigh_lock); list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) { if (neigh->dev == mdev && neigh->eid == eid && neigh->source == source) { list_del_rcu(&neigh->list); /* TODO: immediate RTM_DELNEIGH */ call_rcu(&neigh->rcu, __mctp_neigh_free); dropped = true; } } mutex_unlock(&net->mctp.neigh_lock); return dropped ? 0 : -ENOENT; } static const struct nla_policy nd_mctp_policy[NDA_MAX + 1] = { [NDA_DST] = { .type = NLA_U8 }, [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, }; static int mctp_rtm_newneigh(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *dev; struct mctp_dev *mdev; struct ndmsg *ndm; struct nlattr *tb[NDA_MAX + 1]; int rc; mctp_eid_t eid; void *lladdr; int lladdr_len; rc = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nd_mctp_policy, extack); if (rc < 0) { NL_SET_ERR_MSG(extack, "lladdr too large?"); return rc; } if (!tb[NDA_DST]) { NL_SET_ERR_MSG(extack, "Neighbour EID must be specified"); return -EINVAL; } if (!tb[NDA_LLADDR]) { NL_SET_ERR_MSG(extack, "Neighbour lladdr must be specified"); return -EINVAL; } eid = nla_get_u8(tb[NDA_DST]); if (!mctp_address_unicast(eid)) { NL_SET_ERR_MSG(extack, "Invalid neighbour EID"); return -EINVAL; } lladdr = nla_data(tb[NDA_LLADDR]); lladdr_len = nla_len(tb[NDA_LLADDR]); ndm = nlmsg_data(nlh); dev = __dev_get_by_index(net, ndm->ndm_ifindex); if (!dev) return -ENODEV; mdev = mctp_dev_get_rtnl(dev); if (!mdev) return -ENODEV; if (lladdr_len != dev->addr_len) { NL_SET_ERR_MSG(extack, "Wrong lladdr length"); return -EINVAL; } return mctp_neigh_add(mdev, eid, MCTP_NEIGH_STATIC, lladdr_len, lladdr); } static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NDA_MAX + 1]; struct net_device *dev; struct mctp_dev *mdev; struct ndmsg *ndm; int rc; mctp_eid_t eid; rc = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nd_mctp_policy, extack); if (rc < 0) { NL_SET_ERR_MSG(extack, "incorrect format"); return rc; } if (!tb[NDA_DST]) { NL_SET_ERR_MSG(extack, "Neighbour EID must be specified"); return -EINVAL; } eid = nla_get_u8(tb[NDA_DST]); ndm = nlmsg_data(nlh); dev = __dev_get_by_index(net, ndm->ndm_ifindex); if (!dev) return -ENODEV; mdev = mctp_dev_get_rtnl(dev); if (!mdev) return -ENODEV; return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC); } static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event, unsigned int flags, struct mctp_neigh *neigh) { struct net_device *dev = neigh->dev->dev; struct nlmsghdr *nlh; struct ndmsg *hdr; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); if (!nlh) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ndm_family = AF_MCTP; hdr->ndm_ifindex = dev->ifindex; hdr->ndm_state = 0; // TODO other state bits? if (neigh->source == MCTP_NEIGH_STATIC) hdr->ndm_state |= NUD_PERMANENT; hdr->ndm_flags = 0; hdr->ndm_type = RTN_UNICAST; // TODO: is loopback RTN_LOCAL? if (nla_put_u8(skb, NDA_DST, neigh->eid)) goto cancel; if (nla_put(skb, NDA_LLADDR, dev->addr_len, neigh->ha)) goto cancel; nlmsg_end(skb, nlh); return 0; cancel: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int mctp_rtm_getneigh(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int rc, idx, req_ifindex; struct mctp_neigh *neigh; struct ndmsg *ndmsg; struct { int idx; } *cbctx = (void *)cb->ctx; ndmsg = nlmsg_data(cb->nlh); req_ifindex = ndmsg->ndm_ifindex; idx = 0; rcu_read_lock(); list_for_each_entry_rcu(neigh, &net->mctp.neighbours, list) { if (idx < cbctx->idx) goto cont; rc = 0; if (req_ifindex == 0 || req_ifindex == neigh->dev->dev->ifindex) rc = mctp_fill_neigh(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, neigh); if (rc) break; cont: idx++; } rcu_read_unlock(); cbctx->idx = idx; return skb->len; } int mctp_neigh_lookup(struct mctp_dev *mdev, mctp_eid_t eid, void *ret_hwaddr) { struct net *net = dev_net(mdev->dev); struct mctp_neigh *neigh; int rc = -EHOSTUNREACH; // TODO: or ENOENT? rcu_read_lock(); list_for_each_entry_rcu(neigh, &net->mctp.neighbours, list) { if (mdev == neigh->dev && eid == neigh->eid) { if (ret_hwaddr) memcpy(ret_hwaddr, neigh->ha, sizeof(neigh->ha)); rc = 0; break; } } rcu_read_unlock(); return rc; } /* namespace registration */ static int __net_init mctp_neigh_net_init(struct net *net) { struct netns_mctp *ns = &net->mctp; INIT_LIST_HEAD(&ns->neighbours); mutex_init(&ns->neigh_lock); return 0; } static void __net_exit mctp_neigh_net_exit(struct net *net) { struct netns_mctp *ns = &net->mctp; struct mctp_neigh *neigh; list_for_each_entry(neigh, &ns->neighbours, list) call_rcu(&neigh->rcu, __mctp_neigh_free); } /* net namespace implementation */ static struct pernet_operations mctp_net_ops = { .init = mctp_neigh_net_init, .exit = mctp_neigh_net_exit, }; static const struct rtnl_msg_handler mctp_neigh_rtnl_msg_handlers[] = { {THIS_MODULE, PF_MCTP, RTM_NEWNEIGH, mctp_rtm_newneigh, NULL, 0}, {THIS_MODULE, PF_MCTP, RTM_DELNEIGH, mctp_rtm_delneigh, NULL, 0}, {THIS_MODULE, PF_MCTP, RTM_GETNEIGH, NULL, mctp_rtm_getneigh, 0}, }; int __init mctp_neigh_init(void) { int err; err = register_pernet_subsys(&mctp_net_ops); if (err) return err; err = rtnl_register_many(mctp_neigh_rtnl_msg_handlers); if (err) unregister_pernet_subsys(&mctp_net_ops); return err; } void mctp_neigh_exit(void) { rtnl_unregister_many(mctp_neigh_rtnl_msg_handlers); unregister_pernet_subsys(&mctp_net_ops); } |
234 278 6 3 10 35 39 238 118 67 38 2265 1420 285 456 190 2269 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. NET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the Ethernet handlers. * * Version: @(#)eth.h 1.0.4 05/13/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Relocated to include/linux where it belongs by Alan Cox * <gw4pts@gw4pts.ampr.org> */ #ifndef _LINUX_ETHERDEVICE_H #define _LINUX_ETHERDEVICE_H #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> #include <linux/crc32.h> #include <linux/unaligned.h> #include <asm/bitsperlong.h> #ifdef __KERNEL__ struct device; struct fwnode_handle; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); int platform_get_ethdev_address(struct device *dev, struct net_device *netdev); unsigned char *arch_get_platform_mac_address(void); int nvmem_get_mac_address(struct device *dev, void *addrbuf); int device_get_mac_address(struct device *dev, char *addr); int device_get_ethdev_address(struct device *dev, struct net_device *netdev); int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr); u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); __be16 eth_header_parse_protocol(const struct sk_buff *skb); int eth_prepare_mac_addr_change(struct net_device *dev, void *p); void eth_commit_mac_addr_change(struct net_device *dev, void *p); int eth_mac_addr(struct net_device *dev, void *p); int eth_validate_addr(struct net_device *dev); struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, unsigned int rxqs); #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, unsigned int txqs, unsigned int rxqs); #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); /* Reserved Ethernet Addresses per IEEE 802.1Q */ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; #define eth_stp_addr eth_reserved_addr_base static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; /** * is_link_local_ether_addr - Determine if given Ethernet address is link-local * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per * IEEE 802.1Q 8.6.3 Frame filtering. * * Please note: addr must be aligned to u16. */ static inline bool is_link_local_ether_addr(const u8 *addr) { __be16 *a = (__be16 *)addr; static const __be16 *b = (const __be16 *)eth_reserved_addr_base; static const __be16 m = cpu_to_be16(0xfff0); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | (__force int)((a[2] ^ b[2]) & m)) == 0; #else return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; #endif } /** * is_zero_ether_addr - Determine if give Ethernet address is all zeros. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if the address is all zeroes. * * Please note: addr must be aligned to u16. */ static inline bool is_zero_ether_addr(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0; #else return (*(const u16 *)(addr + 0) | *(const u16 *)(addr + 2) | *(const u16 *)(addr + 4)) == 0; #endif } /** * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if the address is a multicast address. * By definition the broadcast address is also a multicast address. */ static inline bool is_multicast_ether_addr(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) u32 a = *(const u32 *)addr; #else u16 a = *(const u16 *)addr; #endif #ifdef __BIG_ENDIAN return 0x01 & (a >> ((sizeof(a) * 8) - 8)); #else return 0x01 & a; #endif } static inline bool is_multicast_ether_addr_64bits(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #ifdef __BIG_ENDIAN return 0x01 & ((*(const u64 *)addr) >> 56); #else return 0x01 & (*(const u64 *)addr); #endif #else return is_multicast_ether_addr(addr); #endif } /** * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if the address is a local address. */ static inline bool is_local_ether_addr(const u8 *addr) { return 0x02 & addr[0]; } /** * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if the address is the broadcast address. * * Please note: addr must be aligned to u16. */ static inline bool is_broadcast_ether_addr(const u8 *addr) { return (*(const u16 *)(addr + 0) & *(const u16 *)(addr + 2) & *(const u16 *)(addr + 4)) == 0xffff; } /** * is_unicast_ether_addr - Determine if the Ethernet address is unicast * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: true if the address is a unicast address. */ static inline bool is_unicast_ether_addr(const u8 *addr) { return !is_multicast_ether_addr(addr); } /** * is_valid_ether_addr - Determine if the given Ethernet address is valid * @addr: Pointer to a six-byte array containing the Ethernet address * * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not * a multicast address, and is not FF:FF:FF:FF:FF:FF. * * Return: true if the address is valid. * * Please note: addr must be aligned to u16. */ static inline bool is_valid_ether_addr(const u8 *addr) { /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to * explicitly check for it here. */ return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); } /** * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol * @proto: Ethertype/length value to be tested * * Check that the value from the Ethertype/length field is a valid Ethertype. * * Return: true if the valid is an 802.3 supported Ethertype. */ static inline bool eth_proto_is_802_3(__be16 proto) { #ifndef __BIG_ENDIAN /* if CPU is little endian mask off bits representing LSB */ proto &= htons(0xFF00); #endif /* cast both to u16 and compare since LSB can be ignored */ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN); } /** * eth_random_addr - Generate software assigned random Ethernet address * @addr: Pointer to a six-byte array containing the Ethernet address * * Generate a random Ethernet address (MAC) that is not multicast * and has the local assigned bit set. */ static inline void eth_random_addr(u8 *addr) { get_random_bytes(addr, ETH_ALEN); addr[0] &= 0xfe; /* clear multicast bit */ addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ } /** * eth_broadcast_addr - Assign broadcast address * @addr: Pointer to a six-byte array containing the Ethernet address * * Assign the broadcast address to the given address array. */ static inline void eth_broadcast_addr(u8 *addr) { memset(addr, 0xff, ETH_ALEN); } /** * eth_zero_addr - Assign zero address * @addr: Pointer to a six-byte array containing the Ethernet address * * Assign the zero address to the given address array. */ static inline void eth_zero_addr(u8 *addr) { memset(addr, 0x00, ETH_ALEN); } /** * eth_hw_addr_random - Generate software assigned random Ethernet and * set device flag * @dev: pointer to net_device structure * * Generate a random Ethernet address (MAC) to be used by a net device * and set addr_assign_type so the state can be read by sysfs and be * used by userspace. */ static inline void eth_hw_addr_random(struct net_device *dev) { u8 addr[ETH_ALEN]; eth_random_addr(addr); __dev_addr_set(dev, addr, ETH_ALEN); dev->addr_assign_type = NET_ADDR_RANDOM; } /** * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr * @ha: pointer to hardware address * * Calculate CRC from a hardware address as basis for filter hashes. */ static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha) { return ether_crc(ETH_ALEN, ha->addr); } /** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination * @src: Pointer to a six-byte array Ethernet address source * * Please note: dst & src must both be aligned to u16. */ static inline void ether_addr_copy(u8 *dst, const u8 *src) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) *(u32 *)dst = *(const u32 *)src; *(u16 *)(dst + 4) = *(const u16 *)(src + 4); #else u16 *a = (u16 *)dst; const u16 *b = (const u16 *)src; a[0] = b[0]; a[1] = b[1]; a[2] = b[2]; #endif } /** * eth_hw_addr_set - Assign Ethernet address to a net_device * @dev: pointer to net_device structure * @addr: address to assign * * Assign given address to the net_device, addr_assign_type is not changed. */ static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) { __dev_addr_set(dev, addr, ETH_ALEN); } /** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to * @src: pointer to net_device to copy dev_addr from * * Copy the Ethernet address from one net_device to another along with * the address attributes (addr_assign_type). */ static inline void eth_hw_addr_inherit(struct net_device *dst, struct net_device *src) { dst->addr_assign_type = src->addr_assign_type; eth_hw_addr_set(dst, src->dev_addr); } /** * ether_addr_equal - Compare two Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * * Compare two Ethernet addresses, returns true if equal * * Please note: addr1 & addr2 must both be aligned to u16. */ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) | ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4))); return fold == 0; #else const u16 *a = (const u16 *)addr1; const u16 *b = (const u16 *)addr2; return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0; #endif } /** * ether_addr_equal_64bits - Compare two Ethernet addresses * @addr1: Pointer to an array of 8 bytes * @addr2: Pointer to an other array of 8 bytes * * Compare two Ethernet addresses, returns true if equal, false otherwise. * * The function doesn't need any conditional branches and possibly uses * word memory accesses on CPU allowing cheap unaligned memory reads. * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } * * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); #ifdef __BIG_ENDIAN return (fold >> 16) == 0; #else return (fold << 16) == 0; #endif #else return ether_addr_equal(addr1, addr2); #endif } /** * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * * Compare two Ethernet addresses, returns true if equal * * Please note: Use only when any Ethernet address may not be u16 aligned. */ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return ether_addr_equal(addr1, addr2); #else return memcmp(addr1, addr2, ETH_ALEN) == 0; #endif } /** * ether_addr_equal_masked - Compare two Ethernet addresses with a mask * @addr1: Pointer to a six-byte array containing the 1st Ethernet address * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address * @mask: Pointer to a six-byte array containing the Ethernet address bitmask * * Compare two Ethernet addresses with a mask, returns true if for every bit * set in the bitmask the equivalent bits in the ethernet addresses are equal. * Using a mask with all bits set is a slower ether_addr_equal. */ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, const u8 *mask) { int i; for (i = 0; i < ETH_ALEN; i++) { if ((addr1[i] ^ addr2[i]) & mask[i]) return false; } return true; } static inline bool ether_addr_is_ipv4_mcast(const u8 *addr) { u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask); } static inline bool ether_addr_is_ipv6_mcast(const u8 *addr) { u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask); } static inline bool ether_addr_is_ip_mcast(const u8 *addr) { return ether_addr_is_ipv4_mcast(addr) || ether_addr_is_ipv6_mcast(addr); } /** * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return: a u64 value of the address */ static inline u64 ether_addr_to_u64(const u8 *addr) { u64 u = 0; int i; for (i = 0; i < ETH_ALEN; i++) u = u << 8 | addr[i]; return u; } /** * u64_to_ether_addr - Convert a u64 to an Ethernet address. * @u: u64 to convert to an Ethernet MAC address * @addr: Pointer to a six-byte array to contain the Ethernet address */ static inline void u64_to_ether_addr(u64 u, u8 *addr) { int i; for (i = ETH_ALEN - 1; i >= 0; i--) { addr[i] = u & 0xff; u = u >> 8; } } /** * eth_addr_dec - Decrement the given MAC address * * @addr: Pointer to a six-byte array containing Ethernet address to decrement */ static inline void eth_addr_dec(u8 *addr) { u64 u = ether_addr_to_u64(addr); u--; u64_to_ether_addr(u, addr); } /** * eth_addr_inc() - Increment the given MAC address. * @addr: Pointer to a six-byte array containing Ethernet address to increment. */ static inline void eth_addr_inc(u8 *addr) { u64 u = ether_addr_to_u64(addr); u++; u64_to_ether_addr(u, addr); } /** * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address. * * @offset: Offset to add. * @addr: Pointer to a six-byte array containing Ethernet address to increment. */ static inline void eth_addr_add(u8 *addr, long offset) { u64 u = ether_addr_to_u64(addr); u += offset; u64_to_ether_addr(u, addr); } /** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address * * Compare passed address with all addresses of the device. Return true if the * address if one of the device addresses. * * Note that this function calls ether_addr_equal_64bits() so take care of * the right padding. */ static inline bool is_etherdev_addr(const struct net_device *dev, const u8 addr[6 + 2]) { struct netdev_hw_addr *ha; bool res = false; rcu_read_lock(); for_each_dev_addr(dev, ha) { res = ether_addr_equal_64bits(addr, ha->addr); if (res) break; } rcu_read_unlock(); return res; } #endif /* __KERNEL__ */ /** * compare_ether_header - Compare two Ethernet headers * @a: Pointer to Ethernet header * @b: Pointer to Ethernet header * * Compare two Ethernet headers, returns 0 if equal. * This assumes that the network header (i.e., IP header) is 4-byte * aligned OR the platform can handle unaligned access. This is the * case for all packets coming into netif_receive_skb or similar * entry points. */ static inline unsigned long compare_ether_header(const void *a, const void *b) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 unsigned long fold; /* * We want to compare 14 bytes: * [a0 ... a13] ^ [b0 ... b13] * Use two long XOR, ORed together, with an overlap of two bytes. * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] * This means the [a6 a7] ^ [b6 b7] part is done two times. */ fold = *(unsigned long *)a ^ *(unsigned long *)b; fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); return fold; #else u32 *a32 = (u32 *)((u8 *)a + 2); u32 *b32 = (u32 *)((u8 *)b + 2); return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); #endif } /** * eth_hw_addr_gen - Generate and assign Ethernet address to a port * @dev: pointer to port's net_device structure * @base_addr: base Ethernet address * @id: offset to add to the base address * * Generate a MAC address using a base address and an offset and assign it * to a net_device. Commonly used by switch drivers which need to compute * addresses for all their ports. addr_assign_type is not changed. */ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr, unsigned int id) { u64 u = ether_addr_to_u64(base_addr); u8 addr[ETH_ALEN]; u += id; u64_to_ether_addr(u, addr); eth_hw_addr_set(dev, addr); } /** * eth_skb_pkt_type - Assign packet type if destination address does not match * @skb: Assigned a packet type if address does not match @dev address * @dev: Network device used to compare packet address against * * If the destination MAC address of the packet does not match the network * device address, assign an appropriate packet type. */ static inline void eth_skb_pkt_type(struct sk_buff *skb, const struct net_device *dev) { const struct ethhdr *eth = eth_hdr(skb); if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) { if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) { if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } else { skb->pkt_type = PACKET_OTHERHOST; } } } static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb) { struct ethhdr *eth = (struct ethhdr *)skb->data; skb_pull_inline(skb, ETH_HLEN); return eth; } /** * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame * @skb: Buffer to pad * * An Ethernet frame should have a minimum size of 60 bytes. This function * takes short frames and pads them with zeros up to the 60 byte limit. */ static inline int eth_skb_pad(struct sk_buff *skb) { return skb_put_padto(skb, ETH_ZLEN); } #endif /* _LINUX_ETHERDEVICE_H */ |
4 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_COUNTER_H #define _LINUX_PAGE_COUNTER_H #include <linux/atomic.h> #include <linux/cache.h> #include <linux/limits.h> #include <asm/page.h> struct page_counter { /* * Make sure 'usage' does not share cacheline with any other field in * v2. The memcg->memory.usage is a hot member of struct mem_cgroup. */ atomic_long_t usage; unsigned long failcnt; /* v1-only field */ CACHELINE_PADDING(_pad1_); /* effective memory.min and memory.min usage tracking */ unsigned long emin; atomic_long_t min_usage; atomic_long_t children_min_usage; /* effective memory.low and memory.low usage tracking */ unsigned long elow; atomic_long_t low_usage; atomic_long_t children_low_usage; unsigned long watermark; /* Latest cg2 reset watermark */ unsigned long local_watermark; /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); bool protection_support; bool track_failcnt; unsigned long min; unsigned long low; unsigned long high; unsigned long max; struct page_counter *parent; } ____cacheline_internodealigned_in_smp; #if BITS_PER_LONG == 32 #define PAGE_COUNTER_MAX LONG_MAX #else #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) #endif /* * Protection is supported only for the first counter (with id 0). */ static inline void page_counter_init(struct page_counter *counter, struct page_counter *parent, bool protection_support) { counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0); counter->max = PAGE_COUNTER_MAX; counter->parent = parent; counter->protection_support = protection_support; counter->track_failcnt = false; } static inline unsigned long page_counter_read(struct page_counter *counter) { return atomic_long_read(&counter->usage); } void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); static inline void page_counter_set_high(struct page_counter *counter, unsigned long nr_pages) { WRITE_ONCE(counter->high, nr_pages); } int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); static inline void page_counter_reset_watermark(struct page_counter *counter) { unsigned long usage = page_counter_read(counter); /* * Update local_watermark first, so it's always <= watermark * (modulo CPU/compiler re-ordering) */ counter->local_watermark = usage; counter->watermark = usage; } #if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM) void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection); #else static inline void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection) {} #endif #endif /* _LINUX_PAGE_COUNTER_H */ |
23 19 4 20 3 19 2 2 17 4 13 10 3 11 1 1 13 7 6 6 10 6 5 5 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <net/ip.h> #include <net/tcp.h> #include <net/netlink.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_synproxy.h> #include <net/netfilter/nf_synproxy.h> #include <linux/netfilter/nf_tables.h> #include <linux/netfilter/nf_synproxy.h> struct nft_synproxy { struct nf_synproxy_info info; }; static const struct nla_policy nft_synproxy_policy[NFTA_SYNPROXY_MAX + 1] = { [NFTA_SYNPROXY_MSS] = { .type = NLA_U16 }, [NFTA_SYNPROXY_WSCALE] = { .type = NLA_U8 }, [NFTA_SYNPROXY_FLAGS] = { .type = NLA_U32 }, }; static void nft_synproxy_tcp_options(struct synproxy_options *opts, const struct tcphdr *tcp, struct synproxy_net *snet, struct nf_synproxy_info *info, const struct nft_synproxy *priv) { this_cpu_inc(snet->stats->syn_received); if (tcp->ece && tcp->cwr) opts->options |= NF_SYNPROXY_OPT_ECN; opts->options &= priv->info.options; opts->mss_encode = opts->mss_option; opts->mss_option = info->mss; if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, opts); else opts->options &= ~(NF_SYNPROXY_OPT_WSCALE | NF_SYNPROXY_OPT_SACK_PERM | NF_SYNPROXY_OPT_ECN); } static void nft_synproxy_eval_v4(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; if (tcp->syn) { /* Initial SYN from client */ nft_synproxy_tcp_options(opts, tcp, snet, &info, priv); synproxy_send_client_synack(net, skb, tcp, opts); consume_skb(skb); regs->verdict.code = NF_STOLEN; } else if (tcp->ack) { /* ACK from client */ if (synproxy_recv_client_ack(net, skb, tcp, opts, ntohl(tcp->seq))) { consume_skb(skb); regs->verdict.code = NF_STOLEN; } else { regs->verdict.code = NF_DROP; } } } #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) static void nft_synproxy_eval_v6(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; if (tcp->syn) { /* Initial SYN from client */ nft_synproxy_tcp_options(opts, tcp, snet, &info, priv); synproxy_send_client_synack_ipv6(net, skb, tcp, opts); consume_skb(skb); regs->verdict.code = NF_STOLEN; } else if (tcp->ack) { /* ACK from client */ if (synproxy_recv_client_ack_ipv6(net, skb, tcp, opts, ntohl(tcp->seq))) { consume_skb(skb); regs->verdict.code = NF_STOLEN; } else { regs->verdict.code = NF_DROP; } } } #endif /* CONFIG_NF_TABLES_IPV6*/ static void nft_synproxy_do_eval(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct synproxy_options opts = {}; struct sk_buff *skb = pkt->skb; int thoff = nft_thoff(pkt); const struct tcphdr *tcp; struct tcphdr _tcph; if (pkt->tprot != IPPROTO_TCP) { regs->verdict.code = NFT_BREAK; return; } if (nf_ip_checksum(skb, nft_hook(pkt), thoff, IPPROTO_TCP)) { regs->verdict.code = NF_DROP; return; } tcp = skb_header_pointer(skb, thoff, sizeof(struct tcphdr), &_tcph); if (!tcp) { regs->verdict.code = NF_DROP; return; } if (!synproxy_parse_options(skb, thoff, tcp, &opts)) { regs->verdict.code = NF_DROP; return; } switch (skb->protocol) { case htons(ETH_P_IP): nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts); return; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case htons(ETH_P_IPV6): nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts); return; #endif } regs->verdict.code = NFT_BREAK; } static int nft_synproxy_do_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_synproxy *priv) { struct synproxy_net *snet = synproxy_pernet(ctx->net); u32 flags; int err; if (tb[NFTA_SYNPROXY_MSS]) priv->info.mss = ntohs(nla_get_be16(tb[NFTA_SYNPROXY_MSS])); if (tb[NFTA_SYNPROXY_WSCALE]) priv->info.wscale = nla_get_u8(tb[NFTA_SYNPROXY_WSCALE]); if (tb[NFTA_SYNPROXY_FLAGS]) { flags = ntohl(nla_get_be32(tb[NFTA_SYNPROXY_FLAGS])); if (flags & ~NF_SYNPROXY_OPT_MASK) return -EOPNOTSUPP; priv->info.options = flags; } err = nf_ct_netns_get(ctx->net, ctx->family); if (err) return err; switch (ctx->family) { case NFPROTO_IPV4: err = nf_synproxy_ipv4_init(snet, ctx->net); if (err) goto nf_ct_failure; break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: err = nf_synproxy_ipv6_init(snet, ctx->net); if (err) goto nf_ct_failure; break; #endif case NFPROTO_INET: err = nf_synproxy_ipv4_init(snet, ctx->net); if (err) goto nf_ct_failure; err = nf_synproxy_ipv6_init(snet, ctx->net); if (err) { nf_synproxy_ipv4_fini(snet, ctx->net); goto nf_ct_failure; } break; } return 0; nf_ct_failure: nf_ct_netns_put(ctx->net, ctx->family); return err; } static void nft_synproxy_do_destroy(const struct nft_ctx *ctx) { struct synproxy_net *snet = synproxy_pernet(ctx->net); switch (ctx->family) { case NFPROTO_IPV4: nf_synproxy_ipv4_fini(snet, ctx->net); break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: nf_synproxy_ipv6_fini(snet, ctx->net); break; #endif case NFPROTO_INET: nf_synproxy_ipv4_fini(snet, ctx->net); nf_synproxy_ipv6_fini(snet, ctx->net); break; } nf_ct_netns_put(ctx->net, ctx->family); } static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv) { if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) || nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) || nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static void nft_synproxy_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_synproxy *priv = nft_expr_priv(expr); nft_synproxy_do_eval(priv, regs, pkt); } static int nft_synproxy_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && ctx->family != NFPROTO_INET) return -EOPNOTSUPP; return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD)); } static int nft_synproxy_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_synproxy *priv = nft_expr_priv(expr); return nft_synproxy_do_init(ctx, tb, priv); } static void nft_synproxy_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nft_synproxy_do_destroy(ctx); } static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { struct nft_synproxy *priv = nft_expr_priv(expr); return nft_synproxy_do_dump(skb, priv); } static struct nft_expr_type nft_synproxy_type; static const struct nft_expr_ops nft_synproxy_ops = { .eval = nft_synproxy_eval, .size = NFT_EXPR_SIZE(sizeof(struct nft_synproxy)), .init = nft_synproxy_init, .destroy = nft_synproxy_destroy, .dump = nft_synproxy_dump, .type = &nft_synproxy_type, .validate = nft_synproxy_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_synproxy_type __read_mostly = { .ops = &nft_synproxy_ops, .name = "synproxy", .owner = THIS_MODULE, .policy = nft_synproxy_policy, .maxattr = NFTA_SYNPROXY_MAX, }; static int nft_synproxy_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) { struct nft_synproxy *priv = nft_obj_data(obj); return nft_synproxy_do_init(ctx, tb, priv); } static void nft_synproxy_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) { nft_synproxy_do_destroy(ctx); } static int nft_synproxy_obj_dump(struct sk_buff *skb, struct nft_object *obj, bool reset) { struct nft_synproxy *priv = nft_obj_data(obj); return nft_synproxy_do_dump(skb, priv); } static void nft_synproxy_obj_eval(struct nft_object *obj, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_synproxy *priv = nft_obj_data(obj); nft_synproxy_do_eval(priv, regs, pkt); } static void nft_synproxy_obj_update(struct nft_object *obj, struct nft_object *newobj) { struct nft_synproxy *newpriv = nft_obj_data(newobj); struct nft_synproxy *priv = nft_obj_data(obj); priv->info = newpriv->info; } static struct nft_object_type nft_synproxy_obj_type; static const struct nft_object_ops nft_synproxy_obj_ops = { .type = &nft_synproxy_obj_type, .size = sizeof(struct nft_synproxy), .init = nft_synproxy_obj_init, .destroy = nft_synproxy_obj_destroy, .dump = nft_synproxy_obj_dump, .eval = nft_synproxy_obj_eval, .update = nft_synproxy_obj_update, }; static struct nft_object_type nft_synproxy_obj_type __read_mostly = { .type = NFT_OBJECT_SYNPROXY, .ops = &nft_synproxy_obj_ops, .maxattr = NFTA_SYNPROXY_MAX, .policy = nft_synproxy_policy, .owner = THIS_MODULE, }; static int __init nft_synproxy_module_init(void) { int err; err = nft_register_obj(&nft_synproxy_obj_type); if (err < 0) return err; err = nft_register_expr(&nft_synproxy_type); if (err < 0) goto err; return 0; err: nft_unregister_obj(&nft_synproxy_obj_type); return err; } static void __exit nft_synproxy_module_exit(void) { nft_unregister_expr(&nft_synproxy_type); nft_unregister_obj(&nft_synproxy_obj_type); } module_init(nft_synproxy_module_init); module_exit(nft_synproxy_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>"); MODULE_ALIAS_NFT_EXPR("synproxy"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY); MODULE_DESCRIPTION("nftables SYNPROXY expression support"); |
2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Surface2.0/SUR40/PixelSense input driver * * Copyright (c) 2014 by Florian 'floe' Echtler <floe@butterbrot.org> * * Derived from the USB Skeleton driver 1.1, * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com) * * and from the Apple USB BCM5974 multitouch driver, * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se) * * and from the generic hid-multitouch driver, * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> * * and from the v4l2-pci-skeleton driver, * Copyright (c) Copyright 2014 Cisco Systems, Inc. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/printk.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/usb/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-sg.h> /* read 512 bytes from endpoint 0x86 -> get header + blobs */ struct sur40_header { __le16 type; /* always 0x0001 */ __le16 count; /* count of blobs (if 0: continue prev. packet) */ __le32 packet_id; /* unique ID for all packets in one frame */ __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; struct sur40_blob { __le16 blob_id; u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ __le16 bb_pos_x; /* upper left corner of bounding box */ __le16 bb_pos_y; __le16 bb_size_x; /* size of bounding box */ __le16 bb_size_y; __le16 pos_x; /* finger tip position */ __le16 pos_y; __le16 ctr_x; /* centroid position */ __le16 ctr_y; __le16 axis_x; /* somehow related to major/minor axis, mostly: */ __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */ __le32 angle; /* orientation in radians relative to x axis - actually an IEEE754 float, don't use in kernel */ __le32 area; /* size in pixels/pressure (?) */ u8 padding[24]; __le32 tag_id; /* valid when type == 0x04 (SUR40_TAG) */ __le32 unknown; } __packed; /* combined header/blob data */ struct sur40_data { struct sur40_header header; struct sur40_blob blobs[]; } __packed; /* read 512 bytes from endpoint 0x82 -> get header below * continue reading 16k blocks until header.size bytes read */ struct sur40_image_header { __le32 magic; /* "SUBF" */ __le32 packet_id; __le32 size; /* always 0x0007e900 = 960x540 */ __le32 timestamp; /* milliseconds (increases by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; /* version information */ #define DRIVER_SHORT "sur40" #define DRIVER_LONG "Samsung SUR40" #define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>" #define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver" /* vendor and device IDs */ #define ID_MICROSOFT 0x045e #define ID_SUR40 0x0775 /* sensor resolution */ #define SENSOR_RES_X 1920 #define SENSOR_RES_Y 1080 /* touch data endpoint */ #define TOUCH_ENDPOINT 0x86 /* video data endpoint */ #define VIDEO_ENDPOINT 0x82 /* video header fields */ #define VIDEO_HEADER_MAGIC 0x46425553 #define VIDEO_PACKET_SIZE 16384 /* polling interval (ms) */ #define POLL_INTERVAL 1 /* maximum number of contacts FIXME: this is a guess? */ #define MAX_CONTACTS 64 /* control commands */ #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ #define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ #define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ #define SUR40_POKE 0xc5 /* poke register byte */ #define SUR40_PEEK 0xc4 /* 48 bytes registers */ #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ #define SUR40_BLOB 0x01 #define SUR40_TOUCH 0x02 #define SUR40_TAG 0x04 /* video controls */ #define SUR40_BRIGHTNESS_MAX 0xff #define SUR40_BRIGHTNESS_MIN 0x00 #define SUR40_BRIGHTNESS_DEF 0xff #define SUR40_CONTRAST_MAX 0x0f #define SUR40_CONTRAST_MIN 0x00 #define SUR40_CONTRAST_DEF 0x0a #define SUR40_GAIN_MAX 0x09 #define SUR40_GAIN_MIN 0x00 #define SUR40_GAIN_DEF 0x08 #define SUR40_BACKLIGHT_MAX 0x01 #define SUR40_BACKLIGHT_MIN 0x00 #define SUR40_BACKLIGHT_DEF 0x01 #define sur40_str(s) #s #define SUR40_PARAM_RANGE(lo, hi) " (range " sur40_str(lo) "-" sur40_str(hi) ")" /* module parameters */ static uint brightness = SUR40_BRIGHTNESS_DEF; module_param(brightness, uint, 0644); MODULE_PARM_DESC(brightness, "set initial brightness" SUR40_PARAM_RANGE(SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX)); static uint contrast = SUR40_CONTRAST_DEF; module_param(contrast, uint, 0644); MODULE_PARM_DESC(contrast, "set initial contrast" SUR40_PARAM_RANGE(SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX)); static uint gain = SUR40_GAIN_DEF; module_param(gain, uint, 0644); MODULE_PARM_DESC(gain, "set initial gain" SUR40_PARAM_RANGE(SUR40_GAIN_MIN, SUR40_GAIN_MAX)); static const struct v4l2_pix_format sur40_pix_format[] = { { .pixelformat = V4L2_TCH_FMT_TU08, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), }, { .pixelformat = V4L2_PIX_FMT_GREY, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), } }; /* master device state */ struct sur40_state { struct usb_device *usbdev; struct device *dev; struct input_dev *input; struct v4l2_device v4l2; struct video_device vdev; struct mutex lock; struct v4l2_pix_format pix_fmt; struct v4l2_ctrl_handler hdl; struct vb2_queue queue; struct list_head buf_list; spinlock_t qlock; int sequence; struct sur40_data *bulk_in_buffer; size_t bulk_in_size; u8 bulk_in_epaddr; u8 vsvideo; char phys[64]; }; struct sur40_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; /* forward declarations */ static const struct video_device sur40_video_device; static const struct vb2_queue sur40_queue; static void sur40_process_video(struct sur40_state *sur40); static int sur40_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops sur40_ctrl_ops = { .s_ctrl = sur40_s_ctrl, }; /* * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT * here by mistake which is very likely to have corrupted the firmware EEPROM * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug. * Should you ever run into a similar problem, the background story to this * incident and instructions on how to fix the corrupted EEPROM are available * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html */ /* command wrapper */ static int sur40_command(struct sur40_state *dev, u8 command, u16 index, void *buffer, u16 size) { return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), command, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, index, buffer, size, 1000); } /* poke a byte in the panel register space */ static int sur40_poke(struct sur40_state *dev, u8 offset, u8 value) { int result; u8 index = 0x96; // 0xae for permanent write result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x32, index, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x72, offset, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0xb2, value, NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static int sur40_set_preprocessor(struct sur40_state *dev, u8 value) { u8 setting_07[2] = { 0x01, 0x00 }; u8 setting_17[2] = { 0x85, 0x80 }; int result; if (value > 1) return -ERANGE; result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x07, setting_07[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x17, setting_17[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static void sur40_set_vsvideo(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 4; i++) sur40_poke(handle, 0x1c+i, value); handle->vsvideo = value; } static void sur40_set_irlevel(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 8; i++) sur40_poke(handle, 0x08+(2*i), value); } /* Initialization routine, called from sur40_open */ static int sur40_init(struct sur40_state *dev) { int result; u8 *buffer; buffer = kmalloc(24, GFP_KERNEL); if (!buffer) { result = -ENOMEM; goto error; } /* stupidly replay the original MS driver init sequence */ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); if (result < 0) goto error; result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); if (result < 0) goto error; result = 0; /* * Discard the result buffer - no known data inside except * some version strings, maybe extract these sometime... */ error: kfree(buffer); return result; } /* * Callback routines from input_dev */ /* Enable the device, polling will now start. */ static int sur40_open(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "open\n"); return sur40_init(sur40); } /* Disable device, polling has stopped. */ static void sur40_close(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "close\n"); /* * There is no known way to stop the device, so we simply * stop polling. */ } /* * This function is called when a whole contact has been processed, * so that it can assign it to a slot and store the data there. */ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) { int wide, major, minor; int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; if (blob->type != SUR40_TOUCH) return; slotnum = input_mt_get_slot_by_key(input, le16_to_cpu(blob->blob_id)); if (slotnum < 0 || slotnum >= MAX_CONTACTS) return; bb_size_x = le16_to_cpu(blob->bb_size_x); bb_size_y = le16_to_cpu(blob->bb_size_y); pos_x = le16_to_cpu(blob->pos_x); pos_y = le16_to_cpu(blob->pos_y); ctr_x = le16_to_cpu(blob->ctr_x); ctr_y = le16_to_cpu(blob->ctr_y); input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); wide = (bb_size_x > bb_size_y); major = max(bb_size_x, bb_size_y); minor = min(bb_size_x, bb_size_y); input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, pos_y); input_report_abs(input, ABS_MT_TOOL_X, ctr_x); input_report_abs(input, ABS_MT_TOOL_Y, ctr_y); /* TODO: use a better orientation measure */ input_report_abs(input, ABS_MT_ORIENTATION, wide); input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); } /* core function: poll for new input data */ static void sur40_poll(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); int result, bulk_read, need_blobs, packet_blobs, i; struct sur40_header *header = &sur40->bulk_in_buffer->header; struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; dev_dbg(sur40->dev, "poll\n"); need_blobs = -1; do { /* perform a blocking bulk read to get data from the device */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); dev_dbg(sur40->dev, "received %d bytes\n", bulk_read); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); return; } result = bulk_read - sizeof(struct sur40_header); if (result % sizeof(struct sur40_blob) != 0) { dev_err(sur40->dev, "transfer size mismatch\n"); return; } /* first packet? */ if (need_blobs == -1) { need_blobs = le16_to_cpu(header->count); dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); /* packet_id = le32_to_cpu(header->packet_id); */ } /* * Sanity check. when video data is also being retrieved, the * packet ID will usually increase in the middle of a series * instead of at the end. However, the data is still consistent, * so the packet ID is probably just valid for the first packet * in a series. if (packet_id != le32_to_cpu(header->packet_id)) dev_dbg(sur40->dev, "packet ID mismatch\n"); */ packet_blobs = result / sizeof(struct sur40_blob); dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); /* packets always contain at least 4 blobs, even if empty */ if (packet_blobs > need_blobs) packet_blobs = need_blobs; for (i = 0; i < packet_blobs; i++) { need_blobs--; dev_dbg(sur40->dev, "processing blob\n"); sur40_report_blob(&(inblob[i]), input); } } while (need_blobs > 0); input_mt_sync_frame(input); input_sync(input); sur40_process_video(sur40); } /* deal with video data */ static void sur40_process_video(struct sur40_state *sur40) { struct sur40_image_header *img = (void *)(sur40->bulk_in_buffer); struct sur40_buffer *new_buf; struct usb_sg_request sgr; struct sg_table *sgt; int result, bulk_read; if (!vb2_start_streaming_called(&sur40->queue)) return; /* get a new buffer from the list */ spin_lock(&sur40->qlock); if (list_empty(&sur40->buf_list)) { dev_dbg(sur40->dev, "buffer queue empty\n"); spin_unlock(&sur40->qlock); return; } new_buf = list_entry(sur40->buf_list.next, struct sur40_buffer, list); list_del(&new_buf->list); spin_unlock(&sur40->qlock); dev_dbg(sur40->dev, "buffer acquired\n"); /* retrieve data via bulk read */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); goto err_poll; } if (bulk_read != sizeof(struct sur40_image_header)) { dev_err(sur40->dev, "received %d bytes (%zd expected)\n", bulk_read, sizeof(struct sur40_image_header)); goto err_poll; } if (le32_to_cpu(img->magic) != VIDEO_HEADER_MAGIC) { dev_err(sur40->dev, "image magic mismatch\n"); goto err_poll; } if (le32_to_cpu(img->size) != sur40->pix_fmt.sizeimage) { dev_err(sur40->dev, "image size mismatch\n"); goto err_poll; } dev_dbg(sur40->dev, "header acquired\n"); sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0); result = usb_sg_init(&sgr, sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0, sgt->sgl, sgt->nents, sur40->pix_fmt.sizeimage, 0); if (result < 0) { dev_err(sur40->dev, "error %d in usb_sg_init\n", result); goto err_poll; } usb_sg_wait(&sgr); if (sgr.status < 0) { dev_err(sur40->dev, "error %d in usb_sg_wait\n", sgr.status); goto err_poll; } dev_dbg(sur40->dev, "image acquired\n"); /* return error if streaming was stopped in the meantime */ if (sur40->sequence == -1) return; /* mark as finished */ new_buf->vb.vb2_buf.timestamp = ktime_get_ns(); new_buf->vb.sequence = sur40->sequence++; new_buf->vb.field = V4L2_FIELD_NONE; vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); dev_dbg(sur40->dev, "buffer marked done\n"); return; err_poll: vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } /* Initialize input device parameters. */ static int sur40_input_setup_events(struct input_dev *input_dev) { int error; input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_Y, 0, SENSOR_RES_Y, 0, 0); /* max value unknown, but major/minor axis * can never be larger than screen */ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); error = input_mt_init_slots(input_dev, MAX_CONTACTS, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { dev_err(input_dev->dev.parent, "failed to set up slots\n"); return error; } return 0; } /* Check candidate USB interface. */ static int sur40_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(interface); struct sur40_state *sur40; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct input_dev *input; int error; /* Check if we really have the right interface. */ iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; if (iface_desc->desc.bNumEndpoints < 5) return -ENODEV; /* Use endpoint #4 (0x86). */ endpoint = &iface_desc->endpoint[4].desc; if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) return -ENODEV; /* Allocate memory for our device state and initialize it. */ sur40 = kzalloc(sizeof(*sur40), GFP_KERNEL); if (!sur40) return -ENOMEM; input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_dev; } /* initialize locks/lists */ INIT_LIST_HEAD(&sur40->buf_list); spin_lock_init(&sur40->qlock); mutex_init(&sur40->lock); /* Set up regular input device structure */ input->name = DRIVER_LONG; usb_to_input_id(usbdev, &input->id); usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys)); strlcat(sur40->phys, "/input0", sizeof(sur40->phys)); input->phys = sur40->phys; input->dev.parent = &interface->dev; input->open = sur40_open; input->close = sur40_close; error = sur40_input_setup_events(input); if (error) goto err_free_input; input_set_drvdata(input, sur40); error = input_setup_polling(input, sur40_poll); if (error) { dev_err(&interface->dev, "failed to set up polling"); goto err_free_input; } input_set_poll_interval(input, POLL_INTERVAL); sur40->usbdev = usbdev; sur40->dev = &interface->dev; sur40->input = input; /* use the bulk-in endpoint tested above */ sur40->bulk_in_size = usb_endpoint_maxp(endpoint); sur40->bulk_in_epaddr = endpoint->bEndpointAddress; sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL); if (!sur40->bulk_in_buffer) { dev_err(&interface->dev, "Unable to allocate input buffer."); error = -ENOMEM; goto err_free_input; } /* register the polled input device */ error = input_register_device(input); if (error) { dev_err(&interface->dev, "Unable to register polled input device."); goto err_free_buffer; } /* register the video master device */ snprintf(sur40->v4l2.name, sizeof(sur40->v4l2.name), "%s", DRIVER_LONG); error = v4l2_device_register(sur40->dev, &sur40->v4l2); if (error) { dev_err(&interface->dev, "Unable to register video master device."); goto err_unreg_v4l2; } /* initialize the lock and subdevice */ sur40->queue = sur40_queue; sur40->queue.drv_priv = sur40; sur40->queue.lock = &sur40->lock; sur40->queue.dev = sur40->dev; /* initialize the queue */ error = vb2_queue_init(&sur40->queue); if (error) goto err_unreg_v4l2; sur40->pix_fmt = sur40_pix_format[0]; sur40->vdev = sur40_video_device; sur40->vdev.v4l2_dev = &sur40->v4l2; sur40->vdev.lock = &sur40->lock; sur40->vdev.queue = &sur40->queue; video_set_drvdata(&sur40->vdev, sur40); /* initialize the control handler for 4 controls */ v4l2_ctrl_handler_init(&sur40->hdl, 4); sur40->v4l2.ctrl_handler = &sur40->hdl; sur40->vsvideo = (SUR40_CONTRAST_DEF << 4) | SUR40_GAIN_DEF; v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BRIGHTNESS, SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX, 1, clamp(brightness, (uint)SUR40_BRIGHTNESS_MIN, (uint)SUR40_BRIGHTNESS_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_CONTRAST, SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX, 1, clamp(contrast, (uint)SUR40_CONTRAST_MIN, (uint)SUR40_CONTRAST_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_GAIN, SUR40_GAIN_MIN, SUR40_GAIN_MAX, 1, clamp(gain, (uint)SUR40_GAIN_MIN, (uint)SUR40_GAIN_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, SUR40_BACKLIGHT_MIN, SUR40_BACKLIGHT_MAX, 1, SUR40_BACKLIGHT_DEF); v4l2_ctrl_handler_setup(&sur40->hdl); if (sur40->hdl.error) { dev_err(&interface->dev, "Unable to register video controls."); v4l2_ctrl_handler_free(&sur40->hdl); error = sur40->hdl.error; goto err_unreg_v4l2; } error = video_register_device(&sur40->vdev, VFL_TYPE_TOUCH, -1); if (error) { dev_err(&interface->dev, "Unable to register video subdevice."); goto err_unreg_video; } /* we can register the device now, as it is ready */ usb_set_intfdata(interface, sur40); dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC); return 0; err_unreg_video: video_unregister_device(&sur40->vdev); err_unreg_v4l2: v4l2_device_unregister(&sur40->v4l2); err_free_buffer: kfree(sur40->bulk_in_buffer); err_free_input: input_free_device(input); err_free_dev: kfree(sur40); return error; } /* Unregister device & clean up. */ static void sur40_disconnect(struct usb_interface *interface) { struct sur40_state *sur40 = usb_get_intfdata(interface); v4l2_ctrl_handler_free(&sur40->hdl); video_unregister_device(&sur40->vdev); v4l2_device_unregister(&sur40->v4l2); input_unregister_device(sur40->input); kfree(sur40->bulk_in_buffer); kfree(sur40); usb_set_intfdata(interface, NULL); dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC); } /* * Setup the constraints of the queue: besides setting the number of planes * per buffer and the size and allocation context of each plane, it also * checks if sufficient buffers have been allocated. Usually 3 is a good * minimum number: many DMA engines need a minimum of 2 buffers in the * queue and you need to have another available for userspace processing. */ static int sur40_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct sur40_state *sur40 = vb2_get_drv_priv(q); unsigned int q_num_bufs = vb2_get_num_buffers(q); if (q_num_bufs + *nbuffers < 3) *nbuffers = 3 - q_num_bufs; if (*nplanes) return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0; *nplanes = 1; sizes[0] = sur40->pix_fmt.sizeimage; return 0; } /* * Prepare the buffer for queueing to the DMA engine: check and set the * payload size. */ static int sur40_buffer_prepare(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = sur40->pix_fmt.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } /* * Queue this buffer to the DMA engine. */ static void sur40_buffer_queue(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); struct sur40_buffer *buf = (struct sur40_buffer *)vb; spin_lock(&sur40->qlock); list_add_tail(&buf->list, &sur40->buf_list); spin_unlock(&sur40->qlock); } static void return_all_buffers(struct sur40_state *sur40, enum vb2_buffer_state state) { struct sur40_buffer *buf, *node; spin_lock(&sur40->qlock); list_for_each_entry_safe(buf, node, &sur40->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock(&sur40->qlock); } /* * Start streaming. First check if the minimum number of buffers have been * queued. If not, then return -ENOBUFS and the vb2 framework will call * this function again the next time a buffer has been queued until enough * buffers are available to actually start the DMA engine. */ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); sur40->sequence = 0; return 0; } /* * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued * and passed on to the vb2 framework marked as STATE_ERROR. */ static void sur40_stop_streaming(struct vb2_queue *vq) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); vb2_wait_for_all_buffers(vq); sur40->sequence = -1; /* Release all active buffers */ return_all_buffers(sur40, VB2_BUF_STATE_ERROR); } /* V4L ioctl */ static int sur40_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct sur40_state *sur40 = video_drvdata(file); strscpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver)); strscpy(cap->card, DRIVER_LONG, sizeof(cap->card)); usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int sur40_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_TOUCH; i->std = V4L2_STD_UNKNOWN; strscpy(i->name, "In-Cell Sensor", sizeof(i->name)); i->capabilities = 0; return 0; } static int sur40_vidioc_s_input(struct file *file, void *priv, unsigned int i) { return (i == 0) ? 0 : -EINVAL; } static int sur40_vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int sur40_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: f->fmt.pix = sur40_pix_format[1]; break; default: f->fmt.pix = sur40_pix_format[0]; break; } return 0; } static int sur40_vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: sur40->pix_fmt = sur40_pix_format[1]; break; default: sur40->pix_fmt = sur40_pix_format[0]; break; } f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_s_ctrl(struct v4l2_ctrl *ctrl) { struct sur40_state *sur40 = container_of(ctrl->handler, struct sur40_state, hdl); u8 value = sur40->vsvideo; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sur40_set_irlevel(sur40, ctrl->val); break; case V4L2_CID_CONTRAST: value = (value & 0x0f) | (ctrl->val << 4); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_GAIN: value = (value & 0xf0) | (ctrl->val); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_BACKLIGHT_COMPENSATION: sur40_set_preprocessor(sur40, ctrl->val); break; } return 0; } static int sur40_ioctl_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; p->parm.capture.timeperframe.numerator = 1; p->parm.capture.timeperframe.denominator = 60; p->parm.capture.readbuffers = 3; return 0; } static int sur40_vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(sur40_pix_format)) return -EINVAL; f->pixelformat = sur40_pix_format[f->index].pixelformat; f->flags = 0; return 0; } static int sur40_vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index != 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY))) return -EINVAL; f->type = V4L2_FRMSIZE_TYPE_DISCRETE; f->discrete.width = sur40->pix_fmt.width; f->discrete.height = sur40->pix_fmt.height; return 0; } static int sur40_vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index > 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY)) || (f->width != sur40->pix_fmt.width) || (f->height != sur40->pix_fmt.height)) return -EINVAL; f->type = V4L2_FRMIVAL_TYPE_DISCRETE; f->discrete.denominator = 60; f->discrete.numerator = 1; return 0; } static const struct usb_device_id sur40_table[] = { { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */ { } /* terminating null entry */ }; MODULE_DEVICE_TABLE(usb, sur40_table); /* V4L2 structures */ static const struct vb2_ops sur40_queue_ops = { .queue_setup = sur40_queue_setup, .buf_prepare = sur40_buffer_prepare, .buf_queue = sur40_buffer_queue, .start_streaming = sur40_start_streaming, .stop_streaming = sur40_stop_streaming, }; static const struct vb2_queue sur40_queue = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, /* * VB2_USERPTR in currently not enabled: passing a user pointer to * dma-sg will result in segment sizes that are not a multiple of * 512 bytes, which is required by the host controller. */ .io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF, .buf_struct_size = sizeof(struct sur40_buffer), .ops = &sur40_queue_ops, .mem_ops = &vb2_dma_sg_memops, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, .min_queued_buffers = 3, }; static const struct v4l2_file_operations sur40_video_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .read = vb2_fop_read, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; static const struct v4l2_ioctl_ops sur40_video_ioctl_ops = { .vidioc_querycap = sur40_vidioc_querycap, .vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt, .vidioc_try_fmt_vid_cap = sur40_vidioc_try_fmt, .vidioc_s_fmt_vid_cap = sur40_vidioc_s_fmt, .vidioc_g_fmt_vid_cap = sur40_vidioc_g_fmt, .vidioc_enum_framesizes = sur40_vidioc_enum_framesizes, .vidioc_enum_frameintervals = sur40_vidioc_enum_frameintervals, .vidioc_g_parm = sur40_ioctl_parm, .vidioc_s_parm = sur40_ioctl_parm, .vidioc_enum_input = sur40_vidioc_enum_input, .vidioc_g_input = sur40_vidioc_g_input, .vidioc_s_input = sur40_vidioc_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; static const struct video_device sur40_video_device = { .name = DRIVER_LONG, .fops = &sur40_video_fops, .ioctl_ops = &sur40_video_ioctl_ops, .release = video_device_release_empty, .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; /* USB-specific object needed to register this driver with the USB subsystem. */ static struct usb_driver sur40_driver = { .name = DRIVER_SHORT, .probe = sur40_probe, .disconnect = sur40_disconnect, .id_table = sur40_table, }; module_usb_driver(sur40_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
96 98 1 97 95 1 36 36 36 36 36 5 5 5 9 9 9 9 57 57 42 22 22 97 97 97 13 107 119 47 120 120 119 119 120 120 119 119 22 98 1 97 97 119 65 63 35 7 5 69 69 66 62 12 36 69 69 63 47 68 19 18 19 19 7 7 28 43 20 114 105 10 16 43 28 20 87 2 137 118 6 135 6 134 122 122 2 122 26 116 27 27 117 117 117 112 112 112 26 24 1 21 21 20 2 2 23 23 1 7 2 16 6 6 66 24 23 23 19 5 19 24 24 5 19 59 4 4 4 3 1 3 3 3 3 4 4 4 2 2 2 2 2 54 54 54 66 67 51 3 11 3 59 4 2 65 9 57 2 2 1 1 1 14 14 3 3 2 4 3 3 89 90 81 13 12 13 67 1 66 65 56 45 10 46 24 45 11 55 56 2 5 2 2 24 24 23 23 23 3 31 22 22 42 35 31 31 3 22 22 22 22 22 22 22 22 22 22 70 68 28 68 28 70 70 66 44 45 42 21 45 44 45 101 100 9 2 4 90 81 81 13 68 12 8 16 10 1 1 2 7 1 1 1 1 1 2 1 1 13 1 3 1 7 1 10 1 6 6 395 395 396 396 396 51 58 58 14 48 38 13 23 24 10 4 1 26 13 22 21 13 69 71 71 66 16 5 60 61 61 61 19 42 1 41 42 61 51 39 12 12 44 43 3 1 7 39 35 11 23 23 4 22 22 21 1 21 16 20 23 36 22 14 4 35 20 20 20 20 2 18 16 3 28 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 | /* * videobuf2-core.c - video buffer 2 core framework * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * The vb2_thread implementation was based on code from videobuf-dvb.c: * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <media/videobuf2-core.h> #include <media/v4l2-mc.h> #include <trace/events/vb2.h> #define PLANE_INDEX_BITS 3 #define PLANE_INDEX_SHIFT (PAGE_SHIFT + PLANE_INDEX_BITS) #define PLANE_INDEX_MASK (BIT_MASK(PLANE_INDEX_BITS) - 1) #define MAX_BUFFER_INDEX BIT_MASK(30 - PLANE_INDEX_SHIFT) #define BUFFER_INDEX_MASK (MAX_BUFFER_INDEX - 1) #if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES #error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES #endif static int debug; module_param(debug, int, 0644); #define dprintk(q, level, fmt, arg...) \ do { \ if (debug >= level) \ pr_info("[%s] %s: " fmt, (q)->name, __func__, \ ## arg); \ } while (0) #ifdef CONFIG_VIDEO_ADV_DEBUG /* * If advanced debugging is on, then count how often each op is called * successfully, which can either be per-buffer or per-queue. * * This makes it easy to check that the 'init' and 'cleanup' * (and variations thereof) stay balanced. */ #define log_memop(vb, op) \ dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ (vb)->index, #op, \ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") #define call_memop(vb, op, args...) \ ({ \ struct vb2_queue *_q = (vb)->vb2_queue; \ int err; \ \ log_memop(vb, op); \ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ if (!err) \ (vb)->cnt_mem_ ## op++; \ err; \ }) #define call_ptr_memop(op, vb, args...) \ ({ \ struct vb2_queue *_q = (vb)->vb2_queue; \ void *ptr; \ \ log_memop(vb, op); \ ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ if (!IS_ERR_OR_NULL(ptr)) \ (vb)->cnt_mem_ ## op++; \ ptr; \ }) #define call_void_memop(vb, op, args...) \ ({ \ struct vb2_queue *_q = (vb)->vb2_queue; \ \ log_memop(vb, op); \ if (_q->mem_ops->op) \ _q->mem_ops->op(args); \ (vb)->cnt_mem_ ## op++; \ }) #define log_qop(q, op) \ dprintk(q, 2, "call_qop(%s)%s\n", #op, \ (q)->ops->op ? "" : " (nop)") #define call_qop(q, op, args...) \ ({ \ int err; \ \ log_qop(q, op); \ err = (q)->ops->op ? (q)->ops->op(args) : 0; \ if (!err) \ (q)->cnt_ ## op++; \ err; \ }) #define call_void_qop(q, op, args...) \ ({ \ log_qop(q, op); \ if ((q)->ops->op) \ (q)->ops->op(args); \ (q)->cnt_ ## op++; \ }) #define log_vb_qop(vb, op, args...) \ dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ (vb)->index, #op, \ (vb)->vb2_queue->ops->op ? "" : " (nop)") #define call_vb_qop(vb, op, args...) \ ({ \ int err; \ \ log_vb_qop(vb, op); \ err = (vb)->vb2_queue->ops->op ? \ (vb)->vb2_queue->ops->op(args) : 0; \ if (!err) \ (vb)->cnt_ ## op++; \ err; \ }) #define call_void_vb_qop(vb, op, args...) \ ({ \ log_vb_qop(vb, op); \ if ((vb)->vb2_queue->ops->op) \ (vb)->vb2_queue->ops->op(args); \ (vb)->cnt_ ## op++; \ }) #else #define call_memop(vb, op, args...) \ ((vb)->vb2_queue->mem_ops->op ? \ (vb)->vb2_queue->mem_ops->op(args) : 0) #define call_ptr_memop(op, vb, args...) \ ((vb)->vb2_queue->mem_ops->op ? \ (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) #define call_void_memop(vb, op, args...) \ do { \ if ((vb)->vb2_queue->mem_ops->op) \ (vb)->vb2_queue->mem_ops->op(args); \ } while (0) #define call_qop(q, op, args...) \ ((q)->ops->op ? (q)->ops->op(args) : 0) #define call_void_qop(q, op, args...) \ do { \ if ((q)->ops->op) \ (q)->ops->op(args); \ } while (0) #define call_vb_qop(vb, op, args...) \ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) #define call_void_vb_qop(vb, op, args...) \ do { \ if ((vb)->vb2_queue->ops->op) \ (vb)->vb2_queue->ops->op(args); \ } while (0) #endif #define call_bufop(q, op, args...) \ ({ \ int ret = 0; \ if (q && q->buf_ops && q->buf_ops->op) \ ret = q->buf_ops->op(args); \ ret; \ }) #define call_void_bufop(q, op, args...) \ ({ \ if (q && q->buf_ops && q->buf_ops->op) \ q->buf_ops->op(args); \ }) static void __vb2_queue_cancel(struct vb2_queue *q); static const char *vb2_state_name(enum vb2_buffer_state s) { static const char * const state_names[] = { [VB2_BUF_STATE_DEQUEUED] = "dequeued", [VB2_BUF_STATE_IN_REQUEST] = "in request", [VB2_BUF_STATE_PREPARING] = "preparing", [VB2_BUF_STATE_QUEUED] = "queued", [VB2_BUF_STATE_ACTIVE] = "active", [VB2_BUF_STATE_DONE] = "done", [VB2_BUF_STATE_ERROR] = "error", }; if ((unsigned int)(s) < ARRAY_SIZE(state_names)) return state_names[s]; return "unknown"; } /* * __vb2_buf_mem_alloc() - allocate video memory for the given buffer */ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; void *mem_priv; int plane; int ret = -ENOMEM; /* * Allocate memory for all planes in this buffer * NOTE: mmapped areas should be page aligned */ for (plane = 0; plane < vb->num_planes; ++plane) { /* Memops alloc requires size to be page aligned. */ unsigned long size = PAGE_ALIGN(vb->planes[plane].length); /* Did it wrap around? */ if (size < vb->planes[plane].length) goto free; mem_priv = call_ptr_memop(alloc, vb, q->alloc_devs[plane] ? : q->dev, size); if (IS_ERR_OR_NULL(mem_priv)) { if (mem_priv) ret = PTR_ERR(mem_priv); goto free; } /* Associate allocator private data with this plane */ vb->planes[plane].mem_priv = mem_priv; } return 0; free: /* Free already allocated memory if one of the allocations failed */ for (; plane > 0; --plane) { call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); vb->planes[plane - 1].mem_priv = NULL; } return ret; } /* * __vb2_buf_mem_free() - free memory of the given buffer */ static void __vb2_buf_mem_free(struct vb2_buffer *vb) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { call_void_memop(vb, put, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", plane, vb->index); } } /* * __vb2_buf_userptr_put() - release userspace memory associated with * a USERPTR buffer */ static void __vb2_buf_userptr_put(struct vb2_buffer *vb) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { if (vb->planes[plane].mem_priv) call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; } } /* * __vb2_plane_dmabuf_put() - release memory associated with * a DMABUF shared plane */ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) { if (!p->mem_priv) return; if (!p->dbuf_duplicated) { if (p->dbuf_mapped) call_void_memop(vb, unmap_dmabuf, p->mem_priv); call_void_memop(vb, detach_dmabuf, p->mem_priv); } dma_buf_put(p->dbuf); p->mem_priv = NULL; p->dbuf = NULL; p->dbuf_mapped = 0; p->bytesused = 0; p->length = 0; p->m.fd = 0; p->data_offset = 0; p->dbuf_duplicated = false; } /* * __vb2_buf_dmabuf_put() - release memory associated with * a DMABUF shared buffer */ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) { int plane; /* * When multiple planes share the same DMA buffer attachment, the plane * with the lowest index owns the mem_priv. * Put planes in the reversed order so that we don't leave invalid * mem_priv behind. */ for (plane = vb->num_planes - 1; plane >= 0; --plane) __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); } /* * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory * to sync caches */ static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) { unsigned int plane; if (vb->synced) return; vb->synced = 1; for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, prepare, vb->planes[plane].mem_priv); } /* * __vb2_buf_mem_finish() - call ->finish on buffer's private memory * to sync caches */ static void __vb2_buf_mem_finish(struct vb2_buffer *vb) { unsigned int plane; if (!vb->synced) return; vb->synced = 0; for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, finish, vb->planes[plane].mem_priv); } /* * __setup_offsets() - setup unique offsets ("cookies") for every plane in * the buffer. */ static void __setup_offsets(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int plane; unsigned long offset = 0; /* * The offset "cookie" value has the following constraints: * - a buffer can have up to 8 planes. * - v4l2 mem2mem uses bit 30 to distinguish between * OUTPUT (aka "source", bit 30 is 0) and * CAPTURE (aka "destination", bit 30 is 1) buffers. * - must be page aligned * That led to this bit mapping when PAGE_SHIFT = 12: * |30 |29 15|14 12|11 0| * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0 | * where there are 15 bits to store the buffer index. * Depending on PAGE_SHIFT value we can have fewer bits * to store the buffer index. */ offset = vb->index << PLANE_INDEX_SHIFT; for (plane = 0; plane < vb->num_planes; ++plane) { vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT); dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", vb->index, plane, offset); } } static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) { /* * DMA exporter should take care of cache syncs, so we can avoid * explicit ->prepare()/->finish() syncs. For other ->memory types * we always need ->prepare() or/and ->finish() cache sync. */ if (q->memory == VB2_MEMORY_DMABUF) { vb->skip_cache_sync_on_finish = 1; vb->skip_cache_sync_on_prepare = 1; return; } /* * ->finish() cache sync can be avoided when queue direction is * TO_DEVICE. */ if (q->dma_dir == DMA_TO_DEVICE) vb->skip_cache_sync_on_finish = 1; } /** * vb2_queue_add_buffer() - add a buffer to a queue * @q: pointer to &struct vb2_queue with videobuf2 queue. * @vb: pointer to &struct vb2_buffer to be added to the queue. * @index: index where add vb2_buffer in the queue */ static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index) { WARN_ON(index >= q->max_num_buffers || test_bit(index, q->bufs_bitmap) || vb->vb2_queue); q->bufs[index] = vb; vb->index = index; vb->vb2_queue = q; set_bit(index, q->bufs_bitmap); } /** * vb2_queue_remove_buffer() - remove a buffer from a queue * @vb: pointer to &struct vb2_buffer to be removed from the queue. */ static void vb2_queue_remove_buffer(struct vb2_buffer *vb) { clear_bit(vb->index, vb->vb2_queue->bufs_bitmap); vb->vb2_queue->bufs[vb->index] = NULL; vb->vb2_queue = NULL; } /* * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) * video buffer memory for all buffers/planes on the queue and initializes the * queue * @first_index: index of the first created buffer, all newly allocated buffers * have indices in the range [first_index..first_index+count-1] * * Returns the number of buffers successfully allocated. */ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, unsigned int num_buffers, unsigned int num_planes, const unsigned int plane_sizes[VB2_MAX_PLANES], unsigned int *first_index) { unsigned int buffer, plane; struct vb2_buffer *vb; unsigned long index = q->max_num_buffers; int ret; /* * Ensure that the number of already queue + the number of buffers already * in the queue is below q->max_num_buffers */ num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers - vb2_get_num_buffers(q)); while (num_buffers) { index = bitmap_find_next_zero_area(q->bufs_bitmap, q->max_num_buffers, 0, num_buffers, 0); if (index < q->max_num_buffers) break; /* Try to find free space for less buffers */ num_buffers--; } /* If there is no space left to allocate buffers return 0 to indicate the error */ if (!num_buffers) { *first_index = 0; return 0; } *first_index = index; for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate vb2 buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); if (!vb) { dprintk(q, 1, "memory alloc for buffer struct failed\n"); break; } vb->state = VB2_BUF_STATE_DEQUEUED; vb->num_planes = num_planes; vb->type = q->type; vb->memory = memory; init_buffer_cache_hints(q, vb); for (plane = 0; plane < num_planes; ++plane) { vb->planes[plane].length = plane_sizes[plane]; vb->planes[plane].min_length = plane_sizes[plane]; } vb2_queue_add_buffer(q, vb, index++); call_void_bufop(q, init_buffer, vb); /* Allocate video buffer memory for the MMAP type */ if (memory == VB2_MEMORY_MMAP) { ret = __vb2_buf_mem_alloc(vb); if (ret) { dprintk(q, 1, "failed allocating memory for buffer %d\n", buffer); vb2_queue_remove_buffer(vb); kfree(vb); break; } __setup_offsets(vb); /* * Call the driver-provided buffer initialization * callback, if given. An error in initialization * results in queue setup failure. */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { dprintk(q, 1, "buffer %d %p initialization failed\n", buffer, vb); __vb2_buf_mem_free(vb); vb2_queue_remove_buffer(vb); kfree(vb); break; } } } dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", buffer, num_planes); return buffer; } /* * __vb2_free_mem() - release video buffer memory for a given range of * buffers in a given queue */ static void __vb2_free_mem(struct vb2_queue *q, unsigned int start, unsigned int count) { unsigned int i; struct vb2_buffer *vb; for (i = start; i < start + count; i++) { vb = vb2_get_buffer(q, i); if (!vb) continue; /* Free MMAP buffers or release USERPTR buffers */ if (q->memory == VB2_MEMORY_MMAP) __vb2_buf_mem_free(vb); else if (q->memory == VB2_MEMORY_DMABUF) __vb2_buf_dmabuf_put(vb); else __vb2_buf_userptr_put(vb); } } /* * __vb2_queue_free() - free @count buffers from @start index of the queue - video memory and * related information, if no buffers are left return the queue to an * uninitialized state. Might be called even if the queue has already been freed. */ static void __vb2_queue_free(struct vb2_queue *q, unsigned int start, unsigned int count) { unsigned int i; lockdep_assert_held(&q->mmap_lock); /* Call driver-provided cleanup function for each buffer, if provided */ for (i = start; i < start + count; i++) { struct vb2_buffer *vb = vb2_get_buffer(q, i); if (vb && vb->planes[0].mem_priv) call_void_vb_qop(vb, buf_cleanup, vb); } /* Release video buffer memory */ __vb2_free_mem(q, start, count); #ifdef CONFIG_VIDEO_ADV_DEBUG /* * Check that all the calls were balanced during the life-time of this * queue. If not then dump the counters to the kernel log. */ if (vb2_get_num_buffers(q)) { bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || q->cnt_prepare_streaming != q->cnt_unprepare_streaming || q->cnt_wait_prepare != q->cnt_wait_finish; if (unbalanced) { pr_info("unbalanced counters for queue %p:\n", q); if (q->cnt_start_streaming != q->cnt_stop_streaming) pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", q->cnt_queue_setup, q->cnt_start_streaming, q->cnt_stop_streaming); if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming) pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", q->cnt_prepare_streaming, q->cnt_unprepare_streaming); if (q->cnt_wait_prepare != q->cnt_wait_finish) pr_info(" wait_prepare: %u wait_finish: %u\n", q->cnt_wait_prepare, q->cnt_wait_finish); } q->cnt_queue_setup = 0; q->cnt_wait_prepare = 0; q->cnt_wait_finish = 0; q->cnt_prepare_streaming = 0; q->cnt_start_streaming = 0; q->cnt_stop_streaming = 0; q->cnt_unprepare_streaming = 0; } for (i = start; i < start + count; i++) { struct vb2_buffer *vb = vb2_get_buffer(q, i); bool unbalanced; if (!vb) continue; unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || vb->cnt_mem_prepare != vb->cnt_mem_finish || vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || vb->cnt_buf_queue != vb->cnt_buf_done || vb->cnt_buf_prepare != vb->cnt_buf_finish || vb->cnt_buf_init != vb->cnt_buf_cleanup; if (unbalanced) { pr_info("unbalanced counters for queue %p, buffer %d:\n", q, i); if (vb->cnt_buf_init != vb->cnt_buf_cleanup) pr_info(" buf_init: %u buf_cleanup: %u\n", vb->cnt_buf_init, vb->cnt_buf_cleanup); if (vb->cnt_buf_prepare != vb->cnt_buf_finish) pr_info(" buf_prepare: %u buf_finish: %u\n", vb->cnt_buf_prepare, vb->cnt_buf_finish); if (vb->cnt_buf_queue != vb->cnt_buf_done) pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", vb->cnt_buf_out_validate, vb->cnt_buf_queue, vb->cnt_buf_done, vb->cnt_buf_request_complete); if (vb->cnt_mem_alloc != vb->cnt_mem_put) pr_info(" alloc: %u put: %u\n", vb->cnt_mem_alloc, vb->cnt_mem_put); if (vb->cnt_mem_prepare != vb->cnt_mem_finish) pr_info(" prepare: %u finish: %u\n", vb->cnt_mem_prepare, vb->cnt_mem_finish); if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr) pr_info(" get_userptr: %u put_userptr: %u\n", vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf) pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n", vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf); if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf) pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n", vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); pr_info(" get_dmabuf: %u num_users: %u\n", vb->cnt_mem_get_dmabuf, vb->cnt_mem_num_users); } } #endif /* Free vb2 buffers */ for (i = start; i < start + count; i++) { struct vb2_buffer *vb = vb2_get_buffer(q, i); if (!vb) continue; vb2_queue_remove_buffer(vb); kfree(vb); } if (!vb2_get_num_buffers(q)) { q->memory = VB2_MEMORY_UNKNOWN; INIT_LIST_HEAD(&q->queued_list); } } bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { void *mem_priv = vb->planes[plane].mem_priv; /* * If num_users() has not been provided, call_memop * will return 0, apparently nobody cares about this * case anyway. If num_users() returns more than 1, * we are not the only user of the plane's memory. */ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) return true; } return false; } EXPORT_SYMBOL(vb2_buffer_in_use); /* * __buffers_in_use() - return true if any buffers on the queue are in use and * the queue cannot be freed (by the means of REQBUFS(0)) call */ static bool __buffers_in_use(struct vb2_queue *q) { unsigned int buffer; for (buffer = 0; buffer < q->max_num_buffers; ++buffer) { struct vb2_buffer *vb = vb2_get_buffer(q, buffer); if (!vb) continue; if (vb2_buffer_in_use(q, vb)) return true; } return false; } void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) { call_void_bufop(q, fill_user_buffer, vb, pb); } EXPORT_SYMBOL_GPL(vb2_core_querybuf); /* * __verify_userptr_ops() - verify that all memory operations required for * USERPTR queue type have been provided */ static int __verify_userptr_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || !q->mem_ops->put_userptr) return -EINVAL; return 0; } /* * __verify_mmap_ops() - verify that all memory operations required for * MMAP queue type have been provided */ static int __verify_mmap_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || !q->mem_ops->put || !q->mem_ops->mmap) return -EINVAL; return 0; } /* * __verify_dmabuf_ops() - verify that all memory operations required for * DMABUF queue type have been provided */ static int __verify_dmabuf_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || !q->mem_ops->unmap_dmabuf) return -EINVAL; return 0; } int vb2_verify_memory_type(struct vb2_queue *q, enum vb2_memory memory, unsigned int type) { if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && memory != VB2_MEMORY_DMABUF) { dprintk(q, 1, "unsupported memory type\n"); return -EINVAL; } if (type != q->type) { dprintk(q, 1, "requested type is incorrect\n"); return -EINVAL; } /* * Make sure all the required memory ops for given memory type * are available. */ if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { dprintk(q, 1, "MMAP for current setup unsupported\n"); return -EINVAL; } if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { dprintk(q, 1, "USERPTR for current setup unsupported\n"); return -EINVAL; } if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { dprintk(q, 1, "DMABUF for current setup unsupported\n"); return -EINVAL; } /* * Place the busy tests at the end: -EBUSY can be ignored when * create_bufs is called with count == 0, but count == 0 should still * do the memory and type validation. */ if (vb2_fileio_is_active(q)) { dprintk(q, 1, "file io in progress\n"); return -EBUSY; } return 0; } EXPORT_SYMBOL(vb2_verify_memory_type); static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) { q->non_coherent_mem = 0; if (!vb2_queue_allows_cache_hints(q)) return; q->non_coherent_mem = non_coherent_mem; } static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) { if (non_coherent_mem != q->non_coherent_mem) { dprintk(q, 1, "memory coherency model mismatch\n"); return false; } return true; } static int vb2_core_allocated_buffers_storage(struct vb2_queue *q) { if (!q->bufs) q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL); if (!q->bufs) return -ENOMEM; if (!q->bufs_bitmap) q->bufs_bitmap = bitmap_zalloc(q->max_num_buffers, GFP_KERNEL); if (!q->bufs_bitmap) { kfree(q->bufs); q->bufs = NULL; return -ENOMEM; } return 0; } static void vb2_core_free_buffers_storage(struct vb2_queue *q) { kfree(q->bufs); q->bufs = NULL; bitmap_free(q->bufs_bitmap); q->bufs_bitmap = NULL; } int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int flags, unsigned int *count) { unsigned int num_buffers, allocated_buffers, num_planes = 0; unsigned int q_num_bufs = vb2_get_num_buffers(q); unsigned plane_sizes[VB2_MAX_PLANES] = { }; bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; unsigned int i, first_index; int ret = 0; if (q->streaming) { dprintk(q, 1, "streaming active\n"); return -EBUSY; } if (q->waiting_in_dqbuf && *count) { dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } if (*count == 0 || q_num_bufs != 0 || (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || !verify_coherency_flags(q, non_coherent_mem)) { /* * We already have buffers allocated, so first check if they * are not in use and can be freed. */ mutex_lock(&q->mmap_lock); if (debug && q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) dprintk(q, 1, "memory in use, orphaning buffers\n"); /* * Call queue_cancel to clean up any buffers in the * QUEUED state which is possible if buffers were prepared or * queued without ever calling STREAMON. */ __vb2_queue_cancel(q); __vb2_queue_free(q, 0, q->max_num_buffers); mutex_unlock(&q->mmap_lock); q->is_busy = 0; /* * In case of REQBUFS(0) return immediately without calling * driver's queue_setup() callback and allocating resources. */ if (*count == 0) return 0; } /* * Make sure the requested values and current defaults are sane. */ num_buffers = max_t(unsigned int, *count, q->min_reqbufs_allocation); num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); /* * Set this now to ensure that drivers see the correct q->memory value * in the queue_setup op. */ mutex_lock(&q->mmap_lock); ret = vb2_core_allocated_buffers_storage(q); q->memory = memory; mutex_unlock(&q->mmap_lock); if (ret) return ret; set_queue_coherency(q, non_coherent_mem); /* * Ask the driver how many buffers and planes per buffer it requires. * Driver also sets the size and allocator context for each plane. */ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) goto error; /* Check that driver has set sane values */ if (WARN_ON(!num_planes)) { ret = -EINVAL; goto error; } for (i = 0; i < num_planes; i++) if (WARN_ON(!plane_sizes[i])) { ret = -EINVAL; goto error; } /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, &first_index); if (allocated_buffers == 0) { /* There shouldn't be any buffers allocated, so first_index == 0 */ WARN_ON(first_index); dprintk(q, 1, "memory allocation failed\n"); ret = -ENOMEM; goto error; } /* * There is no point in continuing if we can't allocate the minimum * number of buffers needed by this vb2_queue. */ if (allocated_buffers < q->min_reqbufs_allocation) ret = -ENOMEM; /* * Check if driver can handle the allocated number of buffers. */ if (!ret && allocated_buffers < num_buffers) { num_buffers = allocated_buffers; /* * num_planes is set by the previous queue_setup(), but since it * signals to queue_setup() whether it is called from create_bufs() * vs reqbufs() we zero it here to signal that queue_setup() is * called for the reqbufs() case. */ num_planes = 0; ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; /* * Either the driver has accepted a smaller number of buffers, * or .queue_setup() returned an error */ } mutex_lock(&q->mmap_lock); if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' * from already queued buffers and it will reset q->memory to * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, first_index, allocated_buffers); mutex_unlock(&q->mmap_lock); return ret; } mutex_unlock(&q->mmap_lock); /* * Return the number of successfully allocated buffers * to the userspace. */ *count = allocated_buffers; q->waiting_for_buffers = !q->is_output; q->is_busy = 1; return 0; error: mutex_lock(&q->mmap_lock); q->memory = VB2_MEMORY_UNKNOWN; mutex_unlock(&q->mmap_lock); vb2_core_free_buffers_storage(q); return ret; } EXPORT_SYMBOL_GPL(vb2_core_reqbufs); int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int flags, unsigned int *count, unsigned int requested_planes, const unsigned int requested_sizes[], unsigned int *first_index) { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; unsigned int q_num_bufs = vb2_get_num_buffers(q); bool no_previous_buffers = !q_num_bufs; int ret = 0; if (q_num_bufs == q->max_num_buffers) { dprintk(q, 1, "maximum number of buffers already allocated\n"); return -ENOBUFS; } if (no_previous_buffers) { if (q->waiting_in_dqbuf && *count) { dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); /* * Set this now to ensure that drivers see the correct q->memory * value in the queue_setup op. */ mutex_lock(&q->mmap_lock); ret = vb2_core_allocated_buffers_storage(q); q->memory = memory; mutex_unlock(&q->mmap_lock); if (ret) return ret; q->waiting_for_buffers = !q->is_output; set_queue_coherency(q, non_coherent_mem); } else { if (q->memory != memory) { dprintk(q, 1, "memory model mismatch\n"); return -EINVAL; } if (!verify_coherency_flags(q, non_coherent_mem)) return -EINVAL; } num_buffers = min(*count, q->max_num_buffers - q_num_bufs); if (requested_planes && requested_sizes) { num_planes = requested_planes; memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); } /* * Ask the driver, whether the requested number of buffers, planes per * buffer and their sizes are acceptable */ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) goto error; /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, first_index); if (allocated_buffers == 0) { dprintk(q, 1, "memory allocation failed\n"); ret = -ENOMEM; goto error; } /* * Check if driver can handle the so far allocated number of buffers. */ if (allocated_buffers < num_buffers) { num_buffers = allocated_buffers; /* * num_buffers contains the total number of buffers, that the * queue driver has set up */ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; /* * Either the driver has accepted a smaller number of buffers, * or .queue_setup() returned an error */ } mutex_lock(&q->mmap_lock); if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' * from already queued buffers and it will reset q->memory to * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, *first_index, allocated_buffers); mutex_unlock(&q->mmap_lock); return -ENOMEM; } mutex_unlock(&q->mmap_lock); /* * Return the number of successfully allocated buffers * to the userspace. */ *count = allocated_buffers; q->is_busy = 1; return 0; error: if (no_previous_buffers) { mutex_lock(&q->mmap_lock); q->memory = VB2_MEMORY_UNKNOWN; mutex_unlock(&q->mmap_lock); } return ret; } EXPORT_SYMBOL_GPL(vb2_core_create_bufs); void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) { if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); } EXPORT_SYMBOL_GPL(vb2_plane_vaddr); void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) { if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); } EXPORT_SYMBOL_GPL(vb2_plane_cookie); void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) { struct vb2_queue *q = vb->vb2_queue; unsigned long flags; if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) return; if (WARN_ON(state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR && state != VB2_BUF_STATE_QUEUED)) state = VB2_BUF_STATE_ERROR; #ifdef CONFIG_VIDEO_ADV_DEBUG /* * Although this is not a callback, it still does have to balance * with the buf_queue op. So update this counter manually. */ vb->cnt_buf_done++; #endif dprintk(q, 4, "done processing on buffer %d, state: %s\n", vb->index, vb2_state_name(state)); if (state != VB2_BUF_STATE_QUEUED) __vb2_buf_mem_finish(vb); spin_lock_irqsave(&q->done_lock, flags); if (state == VB2_BUF_STATE_QUEUED) { vb->state = VB2_BUF_STATE_QUEUED; } else { /* Add the buffer to the done buffers list */ list_add_tail(&vb->done_entry, &q->done_list); vb->state = state; } atomic_dec(&q->owned_by_drv_count); if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { media_request_object_unbind(&vb->req_obj); media_request_object_put(&vb->req_obj); } spin_unlock_irqrestore(&q->done_lock, flags); trace_vb2_buf_done(q, vb); switch (state) { case VB2_BUF_STATE_QUEUED: return; default: /* Inform any processes that may be waiting for buffers */ wake_up(&q->done_wq); break; } } EXPORT_SYMBOL_GPL(vb2_buffer_done); void vb2_discard_done(struct vb2_queue *q) { struct vb2_buffer *vb; unsigned long flags; spin_lock_irqsave(&q->done_lock, flags); list_for_each_entry(vb, &q->done_list, done_entry) vb->state = VB2_BUF_STATE_ERROR; spin_unlock_irqrestore(&q->done_lock, flags); } EXPORT_SYMBOL_GPL(vb2_discard_done); /* * __prepare_mmap() - prepare an MMAP buffer */ static int __prepare_mmap(struct vb2_buffer *vb) { int ret = 0; ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, vb->planes); return ret ? ret : call_vb_qop(vb, buf_prepare, vb); } /* * __prepare_userptr() - prepare a USERPTR buffer */ static int __prepare_userptr(struct vb2_buffer *vb) { struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane; int ret = 0; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, planes); if (ret) return ret; for (plane = 0; plane < vb->num_planes; ++plane) { /* Skip the plane if already verified */ if (vb->planes[plane].m.userptr && vb->planes[plane].m.userptr == planes[plane].m.userptr && vb->planes[plane].length == planes[plane].length) continue; dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", plane); /* Check if the provided plane buffer is large enough */ if (planes[plane].length < vb->planes[plane].min_length) { dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", planes[plane].length, vb->planes[plane].min_length, plane); ret = -EINVAL; goto err; } /* Release previously acquired memory if present */ if (vb->planes[plane].mem_priv) { if (!reacquired) { reacquired = true; vb->copied_timestamp = 0; call_void_vb_qop(vb, buf_cleanup, vb); } call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); } vb->planes[plane].mem_priv = NULL; vb->planes[plane].bytesused = 0; vb->planes[plane].length = 0; vb->planes[plane].m.userptr = 0; vb->planes[plane].data_offset = 0; /* Acquire each plane's memory */ mem_priv = call_ptr_memop(get_userptr, vb, q->alloc_devs[plane] ? : q->dev, planes[plane].m.userptr, planes[plane].length); if (IS_ERR(mem_priv)) { dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", plane); ret = PTR_ERR(mem_priv); goto err; } vb->planes[plane].mem_priv = mem_priv; } /* * Now that everything is in order, copy relevant information * provided by userspace. */ for (plane = 0; plane < vb->num_planes; ++plane) { vb->planes[plane].bytesused = planes[plane].bytesused; vb->planes[plane].length = planes[plane].length; vb->planes[plane].m.userptr = planes[plane].m.userptr; vb->planes[plane].data_offset = planes[plane].data_offset; } if (reacquired) { /* * One or more planes changed, so we must call buf_init to do * the driver-specific initialization on the newly acquired * buffer, if provided. */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { dprintk(q, 1, "buffer initialization failed\n"); goto err; } } ret = call_vb_qop(vb, buf_prepare, vb); if (ret) { dprintk(q, 1, "buffer preparation failed\n"); call_void_vb_qop(vb, buf_cleanup, vb); goto err; } return 0; err: /* In case of errors, release planes that were already acquired */ for (plane = 0; plane < vb->num_planes; ++plane) { if (vb->planes[plane].mem_priv) call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; vb->planes[plane].m.userptr = 0; vb->planes[plane].length = 0; } return ret; } /* * __prepare_dmabuf() - prepare a DMABUF buffer */ static int __prepare_dmabuf(struct vb2_buffer *vb) { struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane, i; int ret = 0; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, planes); if (ret) return ret; for (plane = 0; plane < vb->num_planes; ++plane) { struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); planes[plane].dbuf = dbuf; if (IS_ERR_OR_NULL(dbuf)) { dprintk(q, 1, "invalid dmabuf fd for plane %d\n", plane); ret = -EINVAL; goto err_put_planes; } /* use DMABUF size if length is not provided */ if (planes[plane].length == 0) planes[plane].length = dbuf->size; if (planes[plane].length < vb->planes[plane].min_length) { dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", planes[plane].length, plane, vb->planes[plane].min_length); ret = -EINVAL; goto err_put_planes; } /* Skip the plane if already verified */ if (dbuf == vb->planes[plane].dbuf && vb->planes[plane].length == planes[plane].length) continue; dprintk(q, 3, "buffer for plane %d changed\n", plane); reacquired = true; } if (reacquired) { if (vb->planes[0].mem_priv) { vb->copied_timestamp = 0; call_void_vb_qop(vb, buf_cleanup, vb); __vb2_buf_dmabuf_put(vb); } for (plane = 0; plane < vb->num_planes; ++plane) { /* * This is an optimization to reduce dma_buf attachment/mapping. * When the same dma_buf is used for multiple planes, there is no need * to create duplicated attachments. */ for (i = 0; i < plane; ++i) { if (planes[plane].dbuf == vb->planes[i].dbuf && q->alloc_devs[plane] == q->alloc_devs[i]) { vb->planes[plane].dbuf_duplicated = true; vb->planes[plane].dbuf = vb->planes[i].dbuf; vb->planes[plane].mem_priv = vb->planes[i].mem_priv; break; } } if (vb->planes[plane].dbuf_duplicated) continue; /* Acquire each plane's memory */ mem_priv = call_ptr_memop(attach_dmabuf, vb, q->alloc_devs[plane] ? : q->dev, planes[plane].dbuf, planes[plane].length); if (IS_ERR(mem_priv)) { dprintk(q, 1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); goto err_put_vb2_buf; } vb->planes[plane].dbuf = planes[plane].dbuf; vb->planes[plane].mem_priv = mem_priv; /* * This pins the buffer(s) with dma_buf_map_attachment()). It's done * here instead just before the DMA, while queueing the buffer(s) so * userspace knows sooner rather than later if the dma-buf map fails. */ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); if (ret) { dprintk(q, 1, "failed to map dmabuf for plane %d\n", plane); goto err_put_vb2_buf; } vb->planes[plane].dbuf_mapped = 1; } } else { for (plane = 0; plane < vb->num_planes; ++plane) dma_buf_put(planes[plane].dbuf); } /* * Now that everything is in order, copy relevant information * provided by userspace. */ for (plane = 0; plane < vb->num_planes; ++plane) { vb->planes[plane].bytesused = planes[plane].bytesused; vb->planes[plane].length = planes[plane].length; vb->planes[plane].m.fd = planes[plane].m.fd; vb->planes[plane].data_offset = planes[plane].data_offset; } if (reacquired) { /* * Call driver-specific initialization on the newly acquired buffer, * if provided. */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { dprintk(q, 1, "buffer initialization failed\n"); goto err_put_vb2_buf; } } ret = call_vb_qop(vb, buf_prepare, vb); if (ret) { dprintk(q, 1, "buffer preparation failed\n"); call_void_vb_qop(vb, buf_cleanup, vb); goto err_put_vb2_buf; } return 0; err_put_planes: for (plane = 0; plane < vb->num_planes; ++plane) { if (!IS_ERR_OR_NULL(planes[plane].dbuf)) dma_buf_put(planes[plane].dbuf); } err_put_vb2_buf: /* In case of errors, release planes that were already acquired */ __vb2_buf_dmabuf_put(vb); return ret; } /* * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing */ static void __enqueue_in_driver(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; vb->state = VB2_BUF_STATE_ACTIVE; atomic_inc(&q->owned_by_drv_count); trace_vb2_buf_queue(q, vb); call_void_vb_qop(vb, buf_queue, vb); } static int __buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; enum vb2_buffer_state orig_state = vb->state; int ret; if (q->error) { dprintk(q, 1, "fatal error occurred on queue\n"); return -EIO; } if (vb->prepared) return 0; WARN_ON(vb->synced); if (q->is_output) { ret = call_vb_qop(vb, buf_out_validate, vb); if (ret) { dprintk(q, 1, "buffer validation failed\n"); return ret; } } vb->state = VB2_BUF_STATE_PREPARING; switch (q->memory) { case VB2_MEMORY_MMAP: ret = __prepare_mmap(vb); break; case VB2_MEMORY_USERPTR: ret = __prepare_userptr(vb); break; case VB2_MEMORY_DMABUF: ret = __prepare_dmabuf(vb); break; default: WARN(1, "Invalid queue type\n"); ret = -EINVAL; break; } if (ret) { dprintk(q, 1, "buffer preparation failed: %d\n", ret); vb->state = orig_state; return ret; } __vb2_buf_mem_prepare(vb); vb->prepared = 1; vb->state = orig_state; return 0; } static int vb2_req_prepare(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); int ret; if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) return -EINVAL; mutex_lock(vb->vb2_queue->lock); ret = __buf_prepare(vb); mutex_unlock(vb->vb2_queue->lock); return ret; } static void __vb2_dqbuf(struct vb2_buffer *vb); static void vb2_req_unprepare(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); mutex_lock(vb->vb2_queue->lock); __vb2_dqbuf(vb); vb->state = VB2_BUF_STATE_IN_REQUEST; mutex_unlock(vb->vb2_queue->lock); WARN_ON(!vb->req_obj.req); } static void vb2_req_queue(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); int err; mutex_lock(vb->vb2_queue->lock); /* * There is no method to propagate an error from vb2_core_qbuf(), * so if this returns a non-0 value, then WARN. * * The only exception is -EIO which is returned if q->error is * set. We just ignore that, and expect this will be caught the * next time vb2_req_prepare() is called. */ err = vb2_core_qbuf(vb->vb2_queue, vb, NULL, NULL); WARN_ON_ONCE(err && err != -EIO); mutex_unlock(vb->vb2_queue->lock); } static void vb2_req_unbind(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); if (vb->state == VB2_BUF_STATE_IN_REQUEST) call_void_bufop(vb->vb2_queue, init_buffer, vb); } static void vb2_req_release(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); if (vb->state == VB2_BUF_STATE_IN_REQUEST) { vb->state = VB2_BUF_STATE_DEQUEUED; if (vb->request) media_request_put(vb->request); vb->request = NULL; } } static const struct media_request_object_ops vb2_core_req_ops = { .prepare = vb2_req_prepare, .unprepare = vb2_req_unprepare, .queue = vb2_req_queue, .unbind = vb2_req_unbind, .release = vb2_req_release, }; bool vb2_request_object_is_buffer(struct media_request_object *obj) { return obj->ops == &vb2_core_req_ops; } EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); unsigned int vb2_request_buffer_cnt(struct media_request *req) { struct media_request_object *obj; unsigned long flags; unsigned int buffer_cnt = 0; spin_lock_irqsave(&req->lock, flags); list_for_each_entry(obj, &req->objects, list) if (vb2_request_object_is_buffer(obj)) buffer_cnt++; spin_unlock_irqrestore(&req->lock, flags); return buffer_cnt; } EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) { int ret; if (vb->state != VB2_BUF_STATE_DEQUEUED) { dprintk(q, 1, "invalid buffer state %s\n", vb2_state_name(vb->state)); return -EINVAL; } if (vb->prepared) { dprintk(q, 1, "buffer already prepared\n"); return -EINVAL; } ret = __buf_prepare(vb); if (ret) return ret; /* Fill buffer information for the userspace */ call_void_bufop(q, fill_user_buffer, vb, pb); dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); return 0; } EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count) { unsigned int i, ret = 0; unsigned int q_num_bufs = vb2_get_num_buffers(q); if (count == 0) return 0; if (count > q_num_bufs) return -EINVAL; if (start > q->max_num_buffers - count) return -EINVAL; mutex_lock(&q->mmap_lock); /* Check that all buffers in the range exist */ for (i = start; i < start + count; i++) { struct vb2_buffer *vb = vb2_get_buffer(q, i); if (!vb) { ret = -EINVAL; goto unlock; } if (vb->state != VB2_BUF_STATE_DEQUEUED) { ret = -EBUSY; goto unlock; } } __vb2_queue_free(q, start, count); dprintk(q, 2, "%u buffers removed\n", count); unlock: mutex_unlock(&q->mmap_lock); return ret; } EXPORT_SYMBOL_GPL(vb2_core_remove_bufs); /* * vb2_start_streaming() - Attempt to start streaming. * @q: videobuf2 queue * * Attempt to start streaming. When this function is called there must be * at least q->min_queued_buffers queued up (i.e. the minimum * number of buffers required for the DMA engine to function). If the * @start_streaming op fails it is supposed to return all the driver-owned * buffers back to vb2 in state QUEUED. Check if that happened and if * not warn and reclaim them forcefully. */ static int vb2_start_streaming(struct vb2_queue *q) { struct vb2_buffer *vb; int ret; /* * If any buffers were queued before streamon, * we can now pass them to driver for processing. */ list_for_each_entry(vb, &q->queued_list, queued_entry) __enqueue_in_driver(vb); /* Tell the driver to start streaming */ q->start_streaming_called = 1; ret = call_qop(q, start_streaming, q, atomic_read(&q->owned_by_drv_count)); if (!ret) return 0; q->start_streaming_called = 0; dprintk(q, 1, "driver refused to start streaming\n"); /* * If you see this warning, then the driver isn't cleaning up properly * after a failed start_streaming(). See the start_streaming() * documentation in videobuf2-core.h for more information how buffers * should be returned to vb2 in start_streaming(). */ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { unsigned i; /* * Forcefully reclaim buffers if the driver did not * correctly return them to vb2. */ for (i = 0; i < q->max_num_buffers; ++i) { vb = vb2_get_buffer(q, i); if (!vb) continue; if (vb->state == VB2_BUF_STATE_ACTIVE) vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); } /* Must be zero now */ WARN_ON(atomic_read(&q->owned_by_drv_count)); } /* * If done_list is not empty, then start_streaming() didn't call * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or * STATE_DONE. */ WARN_ON(!list_empty(&q->done_list)); return ret; } int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, struct media_request *req) { enum vb2_buffer_state orig_state; int ret; if (q->error) { dprintk(q, 1, "fatal error occurred on queue\n"); return -EIO; } if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && q->requires_requests) { dprintk(q, 1, "qbuf requires a request\n"); return -EBADR; } if ((req && q->uses_qbuf) || (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && q->uses_requests)) { dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); return -EBUSY; } if (req) { int ret; q->uses_requests = 1; if (vb->state != VB2_BUF_STATE_DEQUEUED) { dprintk(q, 1, "buffer %d not in dequeued state\n", vb->index); return -EINVAL; } if (q->is_output && !vb->prepared) { ret = call_vb_qop(vb, buf_out_validate, vb); if (ret) { dprintk(q, 1, "buffer validation failed\n"); return ret; } } media_request_object_init(&vb->req_obj); /* Make sure the request is in a safe state for updating. */ ret = media_request_lock_for_update(req); if (ret) return ret; ret = media_request_object_bind(req, &vb2_core_req_ops, q, true, &vb->req_obj); media_request_unlock_for_update(req); if (ret) return ret; vb->state = VB2_BUF_STATE_IN_REQUEST; /* * Increment the refcount and store the request. * The request refcount is decremented again when the * buffer is dequeued. This is to prevent vb2_buffer_done() * from freeing the request from interrupt context, which can * happen if the application closed the request fd after * queueing the request. */ media_request_get(req); vb->request = req; /* Fill buffer information for the userspace */ if (pb) { call_void_bufop(q, copy_timestamp, vb, pb); call_void_bufop(q, fill_user_buffer, vb, pb); } dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } if (vb->state != VB2_BUF_STATE_IN_REQUEST) q->uses_qbuf = 1; switch (vb->state) { case VB2_BUF_STATE_DEQUEUED: case VB2_BUF_STATE_IN_REQUEST: if (!vb->prepared) { ret = __buf_prepare(vb); if (ret) return ret; } break; case VB2_BUF_STATE_PREPARING: dprintk(q, 1, "buffer still being prepared\n"); return -EINVAL; default: dprintk(q, 1, "invalid buffer state %s\n", vb2_state_name(vb->state)); return -EINVAL; } /* * Add to the queued buffers list, a buffer will stay on it until * dequeued in dqbuf. */ orig_state = vb->state; list_add_tail(&vb->queued_entry, &q->queued_list); q->queued_count++; q->waiting_for_buffers = false; vb->state = VB2_BUF_STATE_QUEUED; if (pb) call_void_bufop(q, copy_timestamp, vb, pb); trace_vb2_qbuf(q, vb); /* * If already streaming, give the buffer to driver for processing. * If not, the buffer will be given to driver on next streamon. */ if (q->start_streaming_called) __enqueue_in_driver(vb); /* Fill buffer information for the userspace */ if (pb) call_void_bufop(q, fill_user_buffer, vb, pb); /* * If streamon has been called, and we haven't yet called * start_streaming() since not enough buffers were queued, and * we now have reached the minimum number of queued buffers, * then we can finally call start_streaming(). */ if (q->streaming && !q->start_streaming_called && q->queued_count >= q->min_queued_buffers) { ret = vb2_start_streaming(q); if (ret) { /* * Since vb2_core_qbuf will return with an error, * we should return it to state DEQUEUED since * the error indicates that the buffer wasn't queued. */ list_del(&vb->queued_entry); q->queued_count--; vb->state = orig_state; return ret; } } dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } EXPORT_SYMBOL_GPL(vb2_core_qbuf); /* * __vb2_wait_for_done_vb() - wait for a buffer to become available * for dequeuing * * Will sleep if required for nonblocking == false. */ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) { /* * All operations on vb_done_list are performed under done_lock * spinlock protection. However, buffers may be removed from * it and returned to userspace only while holding both driver's * lock and the done_lock spinlock. Thus we can be sure that as * long as we hold the driver's lock, the list will remain not * empty if list_empty() check succeeds. */ for (;;) { int ret; if (q->waiting_in_dqbuf) { dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } if (!q->streaming) { dprintk(q, 1, "streaming off, will not wait for buffers\n"); return -EINVAL; } if (q->error) { dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); return -EIO; } if (q->last_buffer_dequeued) { dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); return -EPIPE; } if (!list_empty(&q->done_list)) { /* * Found a buffer that we were waiting for. */ break; } if (nonblocking) { dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); return -EAGAIN; } q->waiting_in_dqbuf = 1; /* * We are streaming and blocking, wait for another buffer to * become ready or for streamoff. Driver's lock is released to * allow streamoff or qbuf to be called while waiting. */ if (q->ops->wait_prepare) call_void_qop(q, wait_prepare, q); else if (q->lock) mutex_unlock(q->lock); /* * All locks have been released, it is safe to sleep now. */ dprintk(q, 3, "will sleep waiting for buffers\n"); ret = wait_event_interruptible(q->done_wq, !list_empty(&q->done_list) || !q->streaming || q->error); if (q->ops->wait_finish) call_void_qop(q, wait_finish, q); else if (q->lock) mutex_lock(q->lock); q->waiting_in_dqbuf = 0; /* * We need to reevaluate both conditions again after reacquiring * the locks or return an error if one occurred. */ if (ret) { dprintk(q, 1, "sleep was interrupted\n"); return ret; } } return 0; } /* * __vb2_get_done_vb() - get a buffer ready for dequeuing * * Will sleep if required for nonblocking == false. */ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, void *pb, int nonblocking) { unsigned long flags; int ret = 0; /* * Wait for at least one buffer to become available on the done_list. */ ret = __vb2_wait_for_done_vb(q, nonblocking); if (ret) return ret; /* * Driver's lock has been held since we last verified that done_list * is not empty, so no need for another list_empty(done_list) check. */ spin_lock_irqsave(&q->done_lock, flags); *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); /* * Only remove the buffer from done_list if all planes can be * handled. Some cases such as V4L2 file I/O and DVB have pb * == NULL; skip the check then as there's nothing to verify. */ if (pb) ret = call_bufop(q, verify_planes_array, *vb, pb); if (!ret) list_del(&(*vb)->done_entry); spin_unlock_irqrestore(&q->done_lock, flags); return ret; } int vb2_wait_for_all_buffers(struct vb2_queue *q) { if (!q->streaming) { dprintk(q, 1, "streaming off, will not wait for buffers\n"); return -EINVAL; } if (q->start_streaming_called) wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); return 0; } EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); /* * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state */ static void __vb2_dqbuf(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; /* nothing to do if the buffer is already dequeued */ if (vb->state == VB2_BUF_STATE_DEQUEUED) return; vb->state = VB2_BUF_STATE_DEQUEUED; call_void_bufop(q, init_buffer, vb); } int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, bool nonblocking) { struct vb2_buffer *vb = NULL; int ret; ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); if (ret < 0) return ret; switch (vb->state) { case VB2_BUF_STATE_DONE: dprintk(q, 3, "returning done buffer\n"); break; case VB2_BUF_STATE_ERROR: dprintk(q, 3, "returning done buffer with errors\n"); break; default: dprintk(q, 1, "invalid buffer state %s\n", vb2_state_name(vb->state)); return -EINVAL; } call_void_vb_qop(vb, buf_finish, vb); vb->prepared = 0; if (pindex) *pindex = vb->index; /* Fill buffer information for the userspace */ if (pb) call_void_bufop(q, fill_user_buffer, vb, pb); /* Remove from vb2 queue */ list_del(&vb->queued_entry); q->queued_count--; trace_vb2_dqbuf(q, vb); /* go back to dequeued state */ __vb2_dqbuf(vb); if (WARN_ON(vb->req_obj.req)) { media_request_object_unbind(&vb->req_obj); media_request_object_put(&vb->req_obj); } if (vb->request) media_request_put(vb->request); vb->request = NULL; dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", vb->index, vb2_state_name(vb->state)); return 0; } EXPORT_SYMBOL_GPL(vb2_core_dqbuf); /* * __vb2_queue_cancel() - cancel and stop (pause) streaming * * Removes all queued buffers from driver's queue and all buffers queued by * userspace from vb2's queue. Returns to state after reqbufs. */ static void __vb2_queue_cancel(struct vb2_queue *q) { unsigned int i; /* * Tell driver to stop all transactions and release all queued * buffers. */ if (q->start_streaming_called) call_void_qop(q, stop_streaming, q); if (q->streaming) call_void_qop(q, unprepare_streaming, q); /* * If you see this warning, then the driver isn't cleaning up properly * in stop_streaming(). See the stop_streaming() documentation in * videobuf2-core.h for more information how buffers should be returned * to vb2 in stop_streaming(). */ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { for (i = 0; i < q->max_num_buffers; i++) { struct vb2_buffer *vb = vb2_get_buffer(q, i); if (!vb) continue; if (vb->state == VB2_BUF_STATE_ACTIVE) { pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n", vb->index); vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } } /* Must be zero now */ WARN_ON(atomic_read(&q->owned_by_drv_count)); } q->streaming = 0; q->start_streaming_called = 0; q->queued_count = 0; q->error = 0; q->uses_requests = 0; q->uses_qbuf = 0; /* * Remove all buffers from vb2's list... */ INIT_LIST_HEAD(&q->queued_list); /* * ...and done list; userspace will not receive any buffers it * has not already dequeued before initiating cancel. */ INIT_LIST_HEAD(&q->done_list); atomic_set(&q->owned_by_drv_count, 0); wake_up_all(&q->done_wq); /* * Reinitialize all buffers for next use. * Make sure to call buf_finish for any queued buffers. Normally * that's done in dqbuf, but that's not going to happen when we * cancel the whole queue. Note: this code belongs here, not in * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical * call to __fill_user_buffer() after buf_finish(). That order can't * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). */ for (i = 0; i < q->max_num_buffers; i++) { struct vb2_buffer *vb; struct media_request *req; vb = vb2_get_buffer(q, i); if (!vb) continue; req = vb->req_obj.req; /* * If a request is associated with this buffer, then * call buf_request_cancel() to give the driver to complete() * related request objects. Otherwise those objects would * never complete. */ if (req) { enum media_request_state state; unsigned long flags; spin_lock_irqsave(&req->lock, flags); state = req->state; spin_unlock_irqrestore(&req->lock, flags); if (state == MEDIA_REQUEST_STATE_QUEUED) call_void_vb_qop(vb, buf_request_complete, vb); } __vb2_buf_mem_finish(vb); if (vb->prepared) { call_void_vb_qop(vb, buf_finish, vb); vb->prepared = 0; } __vb2_dqbuf(vb); if (vb->req_obj.req) { media_request_object_unbind(&vb->req_obj); media_request_object_put(&vb->req_obj); } if (vb->request) media_request_put(vb->request); vb->request = NULL; vb->copied_timestamp = 0; } } int vb2_core_streamon(struct vb2_queue *q, unsigned int type) { unsigned int q_num_bufs = vb2_get_num_buffers(q); int ret; if (type != q->type) { dprintk(q, 1, "invalid stream type\n"); return -EINVAL; } if (q->streaming) { dprintk(q, 3, "already streaming\n"); return 0; } if (!q_num_bufs) { dprintk(q, 1, "no buffers have been allocated\n"); return -EINVAL; } if (q_num_bufs < q->min_queued_buffers) { dprintk(q, 1, "need at least %u allocated buffers\n", q->min_queued_buffers); return -EINVAL; } ret = call_qop(q, prepare_streaming, q); if (ret) return ret; /* * Tell driver to start streaming provided sufficient buffers * are available. */ if (q->queued_count >= q->min_queued_buffers) { ret = vb2_start_streaming(q); if (ret) goto unprepare; } q->streaming = 1; dprintk(q, 3, "successful\n"); return 0; unprepare: call_void_qop(q, unprepare_streaming, q); return ret; } EXPORT_SYMBOL_GPL(vb2_core_streamon); void vb2_queue_error(struct vb2_queue *q) { q->error = 1; wake_up_all(&q->done_wq); } EXPORT_SYMBOL_GPL(vb2_queue_error); int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) { if (type != q->type) { dprintk(q, 1, "invalid stream type\n"); return -EINVAL; } /* * Cancel will pause streaming and remove all buffers from the driver * and vb2, effectively returning control over them to userspace. * * Note that we do this even if q->streaming == 0: if you prepare or * queue buffers, and then call streamoff without ever having called * streamon, you would still expect those buffers to be returned to * their normal dequeued state. */ __vb2_queue_cancel(q); q->waiting_for_buffers = !q->is_output; q->last_buffer_dequeued = false; dprintk(q, 3, "successful\n"); return 0; } EXPORT_SYMBOL_GPL(vb2_core_streamoff); /* * __find_plane_by_offset() - find plane associated with the given offset */ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset, struct vb2_buffer **vb, unsigned int *plane) { unsigned int buffer; /* * Sanity checks to ensure the lock is held, MEMORY_MMAP is * used and fileio isn't active. */ lockdep_assert_held(&q->mmap_lock); if (q->memory != VB2_MEMORY_MMAP) { dprintk(q, 1, "queue is not currently set up for mmap\n"); return -EINVAL; } if (vb2_fileio_is_active(q)) { dprintk(q, 1, "file io in progress\n"); return -EBUSY; } /* Get buffer and plane from the offset */ buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK; *plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK; *vb = vb2_get_buffer(q, buffer); if (!*vb) return -EINVAL; if (*plane >= (*vb)->num_planes) return -EINVAL; return 0; } int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, struct vb2_buffer *vb, unsigned int plane, unsigned int flags) { struct vb2_plane *vb_plane; int ret; struct dma_buf *dbuf; if (q->memory != VB2_MEMORY_MMAP) { dprintk(q, 1, "queue is not currently set up for mmap\n"); return -EINVAL; } if (!q->mem_ops->get_dmabuf) { dprintk(q, 1, "queue does not support DMA buffer exporting\n"); return -EINVAL; } if (flags & ~(O_CLOEXEC | O_ACCMODE)) { dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); return -EINVAL; } if (type != q->type) { dprintk(q, 1, "invalid buffer type\n"); return -EINVAL; } if (plane >= vb->num_planes) { dprintk(q, 1, "buffer plane out of range\n"); return -EINVAL; } if (vb2_fileio_is_active(q)) { dprintk(q, 1, "expbuf: file io in progress\n"); return -EBUSY; } vb_plane = &vb->planes[plane]; dbuf = call_ptr_memop(get_dmabuf, vb, vb_plane->mem_priv, flags & O_ACCMODE); if (IS_ERR_OR_NULL(dbuf)) { dprintk(q, 1, "failed to export buffer %d, plane %d\n", vb->index, plane); return -EINVAL; } ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); if (ret < 0) { dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", vb->index, plane, ret); dma_buf_put(dbuf); return ret; } dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", vb->index, plane, ret); *fd = ret; return 0; } EXPORT_SYMBOL_GPL(vb2_core_expbuf); int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; struct vb2_buffer *vb; unsigned int plane = 0; int ret; unsigned long length; /* * Check memory area access mode. */ if (!(vma->vm_flags & VM_SHARED)) { dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); return -EINVAL; } if (q->is_output) { if (!(vma->vm_flags & VM_WRITE)) { dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); return -EINVAL; } } else { if (!(vma->vm_flags & VM_READ)) { dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); return -EINVAL; } } mutex_lock(&q->mmap_lock); /* * Find the plane corresponding to the offset passed by userspace. This * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, offset, &vb, &plane); if (ret) goto unlock; /* * MMAP requires page_aligned buffers. * The buffer length was page_aligned at __vb2_buf_mem_alloc(), * so, we need to do the same here. */ length = PAGE_ALIGN(vb->planes[plane].length); if (length < (vma->vm_end - vma->vm_start)) { dprintk(q, 1, "MMAP invalid, as it would overflow buffer length\n"); ret = -EINVAL; goto unlock; } /* * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, * not as a in-buffer offset. We always want to mmap a whole buffer * from its beginning. */ vma->vm_pgoff = 0; ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); unlock: mutex_unlock(&q->mmap_lock); if (ret) return ret; dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane); return 0; } EXPORT_SYMBOL_GPL(vb2_mmap); #ifndef CONFIG_MMU unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long offset = pgoff << PAGE_SHIFT; struct vb2_buffer *vb; unsigned int plane; void *vaddr; int ret; mutex_lock(&q->mmap_lock); /* * Find the plane corresponding to the offset passed by userspace. This * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, offset, &vb, &plane); if (ret) goto unlock; vaddr = vb2_plane_vaddr(vb, plane); mutex_unlock(&q->mmap_lock); return vaddr ? (unsigned long)vaddr : -EINVAL; unlock: mutex_unlock(&q->mmap_lock); return ret; } EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); #endif int vb2_core_queue_init(struct vb2_queue *q) { /* * Sanity check */ /* * For drivers who don't support max_num_buffers ensure * a backward compatibility. */ if (!q->max_num_buffers) q->max_num_buffers = VB2_MAX_FRAME; /* The maximum is limited by offset cookie encoding pattern */ q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX); if (WARN_ON(!q) || WARN_ON(!q->ops) || WARN_ON(!q->mem_ops) || WARN_ON(!q->type) || WARN_ON(!q->io_modes) || WARN_ON(!q->ops->queue_setup) || WARN_ON(!q->ops->buf_queue)) return -EINVAL; if (WARN_ON(q->max_num_buffers < VB2_MAX_FRAME) || WARN_ON(q->min_queued_buffers > q->max_num_buffers)) return -EINVAL; if (WARN_ON(q->requires_requests && !q->supports_requests)) return -EINVAL; /* * This combination is not allowed since a non-zero value of * q->min_queued_buffers can cause vb2_core_qbuf() to fail if * it has to call start_streaming(), and the Request API expects * that queueing a request (and thus queueing a buffer contained * in that request) will always succeed. There is no method of * propagating an error back to userspace. */ if (WARN_ON(q->supports_requests && q->min_queued_buffers)) return -EINVAL; /* * If the driver needs 'min_queued_buffers' in the queue before * calling start_streaming() then the minimum requirement is * 'min_queued_buffers + 1' to keep at least one buffer available * for userspace. */ if (q->min_reqbufs_allocation < q->min_queued_buffers + 1) q->min_reqbufs_allocation = q->min_queued_buffers + 1; if (WARN_ON(q->min_reqbufs_allocation > q->max_num_buffers)) return -EINVAL; /* Either both or none are set */ if (WARN_ON(!q->ops->wait_prepare ^ !q->ops->wait_finish)) return -EINVAL; /* Warn if q->lock is NULL and no custom wait_prepare is provided */ if (WARN_ON(!q->lock && !q->ops->wait_prepare)) return -EINVAL; INIT_LIST_HEAD(&q->queued_list); INIT_LIST_HEAD(&q->done_list); spin_lock_init(&q->done_lock); mutex_init(&q->mmap_lock); init_waitqueue_head(&q->done_wq); q->memory = VB2_MEMORY_UNKNOWN; if (q->buf_struct_size == 0) q->buf_struct_size = sizeof(struct vb2_buffer); if (q->bidirectional) q->dma_dir = DMA_BIDIRECTIONAL; else q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; if (q->name[0] == '\0') snprintf(q->name, sizeof(q->name), "%s-%p", q->is_output ? "out" : "cap", q); return 0; } EXPORT_SYMBOL_GPL(vb2_core_queue_init); static int __vb2_init_fileio(struct vb2_queue *q, int read); static int __vb2_cleanup_fileio(struct vb2_queue *q); void vb2_core_queue_release(struct vb2_queue *q) { __vb2_cleanup_fileio(q); __vb2_queue_cancel(q); mutex_lock(&q->mmap_lock); __vb2_queue_free(q, 0, q->max_num_buffers); vb2_core_free_buffers_storage(q); q->is_busy = 0; mutex_unlock(&q->mmap_lock); } EXPORT_SYMBOL_GPL(vb2_core_queue_release); __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { __poll_t req_events = poll_requested_events(wait); struct vb2_buffer *vb = NULL; unsigned long flags; /* * poll_wait() MUST be called on the first invocation on all the * potential queues of interest, even if we are not interested in their * events during this first call. Failure to do so will result in * queue's events to be ignored because the poll_table won't be capable * of adding new wait queues thereafter. */ poll_wait(file, &q->done_wq, wait); if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) return 0; if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) return 0; /* * Start file I/O emulator only if streaming API has not been used yet. */ if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) { if (!q->is_output && (q->io_modes & VB2_READ) && (req_events & (EPOLLIN | EPOLLRDNORM))) { if (__vb2_init_fileio(q, 1)) return EPOLLERR; } if (q->is_output && (q->io_modes & VB2_WRITE) && (req_events & (EPOLLOUT | EPOLLWRNORM))) { if (__vb2_init_fileio(q, 0)) return EPOLLERR; /* * Write to OUTPUT queue can be done immediately. */ return EPOLLOUT | EPOLLWRNORM; } } /* * There is nothing to wait for if the queue isn't streaming, or if the * error flag is set. */ if (!vb2_is_streaming(q) || q->error) return EPOLLERR; /* * If this quirk is set and QBUF hasn't been called yet then * return EPOLLERR as well. This only affects capture queues, output * queues will always initialize waiting_for_buffers to false. * This quirk is set by V4L2 for backwards compatibility reasons. */ if (q->quirk_poll_must_check_waiting_for_buffers && q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) return EPOLLERR; /* * For output streams you can call write() as long as there are fewer * buffers queued than there are buffers available. */ if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q)) return EPOLLOUT | EPOLLWRNORM; if (list_empty(&q->done_list)) { /* * If the last buffer was dequeued from a capture queue, * return immediately. DQBUF will return -EPIPE. */ if (q->last_buffer_dequeued) return EPOLLIN | EPOLLRDNORM; } /* * Take first buffer available for dequeuing. */ spin_lock_irqsave(&q->done_lock, flags); if (!list_empty(&q->done_list)) vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); spin_unlock_irqrestore(&q->done_lock, flags); if (vb && (vb->state == VB2_BUF_STATE_DONE || vb->state == VB2_BUF_STATE_ERROR)) { return (q->is_output) ? EPOLLOUT | EPOLLWRNORM : EPOLLIN | EPOLLRDNORM; } return 0; } EXPORT_SYMBOL_GPL(vb2_core_poll); /* * struct vb2_fileio_buf - buffer context used by file io emulator * * vb2 provides a compatibility layer and emulator of file io (read and * write) calls on top of streaming API. This structure is used for * tracking context related to the buffers. */ struct vb2_fileio_buf { void *vaddr; unsigned int size; unsigned int pos; unsigned int queued:1; }; /* * struct vb2_fileio_data - queue context used by file io emulator * * @cur_index: the index of the buffer currently being read from or * written to. If equal to number of buffers in the vb2_queue * then a new buffer must be dequeued. * @initial_index: in the read() case all buffers are queued up immediately * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles * buffers. However, in the write() case no buffers are initially * queued, instead whenever a buffer is full it is queued up by * __vb2_perform_fileio(). Only once all available buffers have * been queued up will __vb2_perform_fileio() start to dequeue * buffers. This means that initially __vb2_perform_fileio() * needs to know what buffer index to use when it is queuing up * the buffers for the first time. That initial index is stored * in this field. Once it is equal to number of buffers in the * vb2_queue all available buffers have been queued and * __vb2_perform_fileio() should start the normal dequeue/queue cycle. * * vb2 provides a compatibility layer and emulator of file io (read and * write) calls on top of streaming API. For proper operation it required * this structure to save the driver state between each call of the read * or write function. */ struct vb2_fileio_data { unsigned int count; unsigned int type; unsigned int memory; struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; unsigned int cur_index; unsigned int initial_index; unsigned int q_count; unsigned int dq_count; unsigned read_once:1; unsigned write_immediately:1; }; /* * __vb2_init_fileio() - initialize file io emulator * @q: videobuf2 queue * @read: mode selector (1 means read, 0 means write) */ static int __vb2_init_fileio(struct vb2_queue *q, int read) { struct vb2_fileio_data *fileio; struct vb2_buffer *vb; int i, ret; /* * Sanity check */ if (WARN_ON((read && !(q->io_modes & VB2_READ)) || (!read && !(q->io_modes & VB2_WRITE)))) return -EINVAL; /* * Check if device supports mapping buffers to kernel virtual space. */ if (!q->mem_ops->vaddr) return -EBUSY; /* * Check if streaming api has not been already activated. */ if (q->streaming || vb2_get_num_buffers(q) > 0) return -EBUSY; dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", (read) ? "read" : "write", q->min_reqbufs_allocation, q->fileio_read_once, q->fileio_write_immediately); fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); if (fileio == NULL) return -ENOMEM; fileio->read_once = q->fileio_read_once; fileio->write_immediately = q->fileio_write_immediately; /* * Request buffers and use MMAP type to force driver * to allocate buffers by itself. */ fileio->count = q->min_reqbufs_allocation; fileio->memory = VB2_MEMORY_MMAP; fileio->type = q->type; q->fileio = fileio; ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); if (ret) goto err_kfree; /* vb2_fileio_data supports max VB2_MAX_FRAME buffers */ if (fileio->count > VB2_MAX_FRAME) { dprintk(q, 1, "fileio: more than VB2_MAX_FRAME buffers requested\n"); ret = -ENOSPC; goto err_reqbufs; } /* * Userspace can never add or delete buffers later, so there * will never be holes. It is safe to assume that vb2_get_buffer(q, 0) * will always return a valid vb pointer */ vb = vb2_get_buffer(q, 0); /* * Check if plane_count is correct * (multiplane buffers are not supported). */ if (vb->num_planes != 1) { ret = -EBUSY; goto err_reqbufs; } /* * Get kernel address of each buffer. */ for (i = 0; i < vb2_get_num_buffers(q); i++) { /* vb can never be NULL when using fileio. */ vb = vb2_get_buffer(q, i); fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0); if (fileio->bufs[i].vaddr == NULL) { ret = -EINVAL; goto err_reqbufs; } fileio->bufs[i].size = vb2_plane_size(vb, 0); } /* * Read mode requires pre queuing of all buffers. */ if (read) { /* * Queue all buffers. */ for (i = 0; i < vb2_get_num_buffers(q); i++) { struct vb2_buffer *vb2 = vb2_get_buffer(q, i); if (!vb2) continue; ret = vb2_core_qbuf(q, vb2, NULL, NULL); if (ret) goto err_reqbufs; fileio->bufs[i].queued = 1; } /* * All buffers have been queued, so mark that by setting * initial_index to the number of buffers in the vb2_queue */ fileio->initial_index = vb2_get_num_buffers(q); fileio->cur_index = fileio->initial_index; } /* * Start streaming. */ ret = vb2_core_streamon(q, q->type); if (ret) goto err_reqbufs; return ret; err_reqbufs: fileio->count = 0; vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); err_kfree: q->fileio = NULL; kfree(fileio); return ret; } /* * __vb2_cleanup_fileio() - free resourced used by file io emulator * @q: videobuf2 queue */ static int __vb2_cleanup_fileio(struct vb2_queue *q) { struct vb2_fileio_data *fileio = q->fileio; if (fileio) { vb2_core_streamoff(q, q->type); q->fileio = NULL; fileio->count = 0; vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); kfree(fileio); dprintk(q, 3, "file io emulator closed\n"); } return 0; } /* * __vb2_perform_fileio() - perform a single file io (read or write) operation * @q: videobuf2 queue * @data: pointed to target userspace buffer * @count: number of bytes to read or write * @ppos: file handle position tracking pointer * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) * @read: access mode selector (1 means read, 0 means write) */ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblock, int read) { struct vb2_fileio_data *fileio; struct vb2_fileio_buf *buf; bool is_multiplanar = q->is_multiplanar; /* * When using write() to write data to an output video node the vb2 core * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody * else is able to provide this information with the write() operation. */ bool copy_timestamp = !read && q->copy_timestamp; unsigned index; int ret; dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", read ? "read" : "write", (long)*ppos, count, nonblock ? "non" : ""); if (!data) return -EINVAL; if (q->waiting_in_dqbuf) { dprintk(q, 3, "another dup()ped fd is %s\n", read ? "reading" : "writing"); return -EBUSY; } /* * Initialize emulator on first call. */ if (!vb2_fileio_is_active(q)) { ret = __vb2_init_fileio(q, read); dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); if (ret) return ret; } fileio = q->fileio; /* * Check if we need to dequeue the buffer. */ index = fileio->cur_index; if (index >= vb2_get_num_buffers(q)) { struct vb2_buffer *b; /* * Call vb2_dqbuf to get buffer back. */ ret = vb2_core_dqbuf(q, &index, NULL, nonblock); dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); if (ret) return ret; fileio->dq_count += 1; fileio->cur_index = index; buf = &fileio->bufs[index]; /* b can never be NULL when using fileio. */ b = vb2_get_buffer(q, index); /* * Get number of bytes filled by the driver */ buf->pos = 0; buf->queued = 0; buf->size = read ? vb2_get_plane_payload(b, 0) : vb2_plane_size(b, 0); /* Compensate for data_offset on read in the multiplanar case. */ if (is_multiplanar && read && b->planes[0].data_offset < buf->size) { buf->pos = b->planes[0].data_offset; buf->size -= buf->pos; } } else { buf = &fileio->bufs[index]; } /* * Limit count on last few bytes of the buffer. */ if (buf->pos + count > buf->size) { count = buf->size - buf->pos; dprintk(q, 5, "reducing read count: %zd\n", count); } /* * Transfer data to userspace. */ dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", count, index, buf->pos); if (read) ret = copy_to_user(data, buf->vaddr + buf->pos, count); else ret = copy_from_user(buf->vaddr + buf->pos, data, count); if (ret) { dprintk(q, 3, "error copying data\n"); return -EFAULT; } /* * Update counters. */ buf->pos += count; *ppos += count; /* * Queue next buffer if required. */ if (buf->pos == buf->size || (!read && fileio->write_immediately)) { /* b can never be NULL when using fileio. */ struct vb2_buffer *b = vb2_get_buffer(q, index); /* * Check if this is the last buffer to read. */ if (read && fileio->read_once && fileio->dq_count == 1) { dprintk(q, 3, "read limit reached\n"); return __vb2_cleanup_fileio(q); } /* * Call vb2_qbuf and give buffer to the driver. */ b->planes[0].bytesused = buf->pos; if (copy_timestamp) b->timestamp = ktime_get_ns(); ret = vb2_core_qbuf(q, b, NULL, NULL); dprintk(q, 5, "vb2_qbuf result: %d\n", ret); if (ret) return ret; /* * Buffer has been queued, update the status */ buf->pos = 0; buf->queued = 1; buf->size = vb2_plane_size(b, 0); fileio->q_count += 1; /* * If we are queuing up buffers for the first time, then * increase initial_index by one. */ if (fileio->initial_index < vb2_get_num_buffers(q)) fileio->initial_index++; /* * The next buffer to use is either a buffer that's going to be * queued for the first time (initial_index < number of buffers in the vb2_queue) * or it is equal to the number of buffers in the vb2_queue, * meaning that the next time we need to dequeue a buffer since * we've now queued up all the 'first time' buffers. */ fileio->cur_index = fileio->initial_index; } /* * Return proper number of bytes processed. */ if (ret == 0) ret = count; return ret; } size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblocking) { return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); } EXPORT_SYMBOL_GPL(vb2_read); size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblocking) { return __vb2_perform_fileio(q, (char __user *) data, count, ppos, nonblocking, 0); } EXPORT_SYMBOL_GPL(vb2_write); struct vb2_threadio_data { struct task_struct *thread; vb2_thread_fnc fnc; void *priv; bool stop; }; static int vb2_thread(void *data) { struct vb2_queue *q = data; struct vb2_threadio_data *threadio = q->threadio; bool copy_timestamp = false; unsigned prequeue = 0; unsigned index = 0; int ret = 0; if (q->is_output) { prequeue = vb2_get_num_buffers(q); copy_timestamp = q->copy_timestamp; } set_freezable(); for (;;) { struct vb2_buffer *vb; /* * Call vb2_dqbuf to get buffer back. */ if (prequeue) { vb = vb2_get_buffer(q, index++); if (!vb) continue; prequeue--; } else { if (!threadio->stop) { if (q->ops->wait_finish) call_void_qop(q, wait_finish, q); else if (q->lock) mutex_lock(q->lock); ret = vb2_core_dqbuf(q, &index, NULL, 0); if (q->ops->wait_prepare) call_void_qop(q, wait_prepare, q); else if (q->lock) mutex_unlock(q->lock); } dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); if (!ret) vb = vb2_get_buffer(q, index); } if (ret || threadio->stop) break; try_to_freeze(); if (vb->state != VB2_BUF_STATE_ERROR) if (threadio->fnc(vb, threadio->priv)) break; if (copy_timestamp) vb->timestamp = ktime_get_ns(); if (!threadio->stop) { if (q->ops->wait_finish) call_void_qop(q, wait_finish, q); else if (q->lock) mutex_lock(q->lock); ret = vb2_core_qbuf(q, vb, NULL, NULL); if (q->ops->wait_prepare) call_void_qop(q, wait_prepare, q); else if (q->lock) mutex_unlock(q->lock); } if (ret || threadio->stop) break; } /* Hmm, linux becomes *very* unhappy without this ... */ while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); schedule(); } return 0; } /* * This function should not be used for anything else but the videobuf2-dvb * support. If you think you have another good use-case for this, then please * contact the linux-media mailinglist first. */ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, const char *thread_name) { struct vb2_threadio_data *threadio; int ret = 0; if (q->threadio) return -EBUSY; if (vb2_is_busy(q)) return -EBUSY; if (WARN_ON(q->fileio)) return -EBUSY; threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); if (threadio == NULL) return -ENOMEM; threadio->fnc = fnc; threadio->priv = priv; ret = __vb2_init_fileio(q, !q->is_output); dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); if (ret) goto nomem; q->threadio = threadio; threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); if (IS_ERR(threadio->thread)) { ret = PTR_ERR(threadio->thread); threadio->thread = NULL; goto nothread; } return 0; nothread: __vb2_cleanup_fileio(q); nomem: kfree(threadio); return ret; } EXPORT_SYMBOL_GPL(vb2_thread_start); int vb2_thread_stop(struct vb2_queue *q) { struct vb2_threadio_data *threadio = q->threadio; int err; if (threadio == NULL) return 0; threadio->stop = true; /* Wake up all pending sleeps in the thread */ vb2_queue_error(q); err = kthread_stop(threadio->thread); __vb2_cleanup_fileio(q); threadio->thread = NULL; kfree(threadio); q->threadio = NULL; return err; } EXPORT_SYMBOL_GPL(vb2_thread_stop); MODULE_DESCRIPTION("Media buffer core framework"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("DMA_BUF"); |
1 1 1 1 1 2 2 2 1 1 2 2 22 22 1 1 11 11 1 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 | // SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Basic Transport Functions exploiting Infiniband API * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> */ #include <linux/socket.h> #include <linux/if_vlan.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/reboot.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/smc.h> #include <net/tcp.h> #include <net/sock.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "smc.h" #include "smc_clc.h" #include "smc_core.h" #include "smc_ib.h" #include "smc_wr.h" #include "smc_llc.h" #include "smc_cdc.h" #include "smc_close.h" #include "smc_ism.h" #include "smc_netlink.h" #include "smc_stats.h" #include "smc_tracepoint.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ) struct smc_lgr_list smc_lgr_list = { /* established link groups */ .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock), .list = LIST_HEAD_INIT(smc_lgr_list.list), .num = 0, }; static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc); static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); static void smc_link_down_work(struct work_struct *work); /* return head of link group list and its lock for a given link group */ static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr, spinlock_t **lgr_lock) { if (lgr->is_smcd) { *lgr_lock = &lgr->smcd->lgr_lock; return &lgr->smcd->lgr_list; } *lgr_lock = &smc_lgr_list.lock; return &smc_lgr_list.list; } static void smc_ibdev_cnt_inc(struct smc_link *lnk) { atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]); } static void smc_ibdev_cnt_dec(struct smc_link *lnk) { atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]); } static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) { /* client link group creation always follows the server link group * creation. For client use a somewhat higher removal delay time, * otherwise there is a risk of out-of-sync link groups. */ if (!lgr->freeing) { mod_delayed_work(system_wq, &lgr->free_work, (!lgr->is_smcd && lgr->role == SMC_CLNT) ? SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); } } /* Register connection's alert token in our lookup structure. * To use rbtrees we have to implement our own insert core. * Requires @conns_lock * @smc connection to register * Returns 0 on success, != otherwise. */ static void smc_lgr_add_alert_token(struct smc_connection *conn) { struct rb_node **link, *parent = NULL; u32 token = conn->alert_token_local; link = &conn->lgr->conns_all.rb_node; while (*link) { struct smc_connection *cur = rb_entry(*link, struct smc_connection, alert_node); parent = *link; if (cur->alert_token_local > token) link = &parent->rb_left; else link = &parent->rb_right; } /* Put the new node there */ rb_link_node(&conn->alert_node, parent, link); rb_insert_color(&conn->alert_node, &conn->lgr->conns_all); } /* assign an SMC-R link to the connection */ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first) { enum smc_link_state expected = first ? SMC_LNK_ACTIVATING : SMC_LNK_ACTIVE; int i, j; /* do link balancing */ conn->lnk = NULL; /* reset conn->lnk first */ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &conn->lgr->lnk[i]; if (lnk->state != expected || lnk->link_is_asym) continue; if (conn->lgr->role == SMC_CLNT) { conn->lnk = lnk; /* temporary, SMC server assigns link*/ break; } if (conn->lgr->conns_num % 2) { for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { struct smc_link *lnk2; lnk2 = &conn->lgr->lnk[j]; if (lnk2->state == expected && !lnk2->link_is_asym) { conn->lnk = lnk2; break; } } } if (!conn->lnk) conn->lnk = lnk; break; } if (!conn->lnk) return SMC_CLC_DECL_NOACTLINK; atomic_inc(&conn->lnk->conn_cnt); return 0; } /* Register connection in link group by assigning an alert token * registered in a search tree. * Requires @conns_lock * Note that '0' is a reserved value and not assigned. */ static int smc_lgr_register_conn(struct smc_connection *conn, bool first) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); static atomic_t nexttoken = ATOMIC_INIT(0); int rc; if (!conn->lgr->is_smcd) { rc = smcr_lgr_conn_assign_link(conn, first); if (rc) { conn->lgr = NULL; return rc; } } /* find a new alert_token_local value not yet used by some connection * in this link group */ sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */ while (!conn->alert_token_local) { conn->alert_token_local = atomic_inc_return(&nexttoken); if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr)) conn->alert_token_local = 0; } smc_lgr_add_alert_token(conn); conn->lgr->conns_num++; return 0; } /* Unregister connection and reset the alert token of the given connection< */ static void __smc_lgr_unregister_conn(struct smc_connection *conn) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); struct smc_link_group *lgr = conn->lgr; rb_erase(&conn->alert_node, &lgr->conns_all); if (conn->lnk) atomic_dec(&conn->lnk->conn_cnt); lgr->conns_num--; conn->alert_token_local = 0; sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */ } /* Unregister connection from lgr */ static void smc_lgr_unregister_conn(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; if (!smc_conn_lgr_valid(conn)) return; write_lock_bh(&lgr->conns_lock); if (conn->alert_token_local) { __smc_lgr_unregister_conn(conn); } write_unlock_bh(&lgr->conns_lock); } static void smc_lgr_buf_list_add(struct smc_link_group *lgr, bool is_rmb, struct list_head *buf_list, struct smc_buf_desc *buf_desc) { list_add(&buf_desc->list, buf_list); if (is_rmb) { lgr->alloc_rmbs += buf_desc->len; lgr->alloc_rmbs += lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0; } else { lgr->alloc_sndbufs += buf_desc->len; } } static void smc_lgr_buf_list_del(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc) { list_del(&buf_desc->list); if (is_rmb) { lgr->alloc_rmbs -= buf_desc->len; lgr->alloc_rmbs -= lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0; } else { lgr->alloc_sndbufs -= buf_desc->len; } } int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); char hostname[SMC_MAX_HOSTNAME_LEN + 1]; char smc_seid[SMC_MAX_EID_LEN + 1]; struct nlattr *attrs; u8 *seid = NULL; u8 *host = NULL; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_SYS_INFO); if (!nlh) goto errmsg; if (cb_ctx->pos[0]) goto errout; attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO); if (!attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2)) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE)) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable())) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true)) goto errattr; smc_clc_get_hostname(&host); if (host) { memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN); hostname[SMC_MAX_HOSTNAME_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname)) goto errattr; } if (smc_ism_is_v2_capable()) { smc_ism_get_system_eid(&seid); memcpy(smc_seid, seid, SMC_MAX_EID_LEN); smc_seid[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); cb_ctx->pos[0] = 1; return skb->len; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return skb->len; } /* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */ static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb, struct nlattr *v2_attrs) { char smc_host[SMC_MAX_HOSTNAME_LEN + 1]; char smc_eid[SMC_MAX_EID_LEN + 1]; if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os)) goto errv2attr; memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN); smc_host[SMC_MAX_HOSTNAME_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host)) goto errv2attr; memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN); smc_eid[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid)) goto errv2attr; nla_nest_end(skb, v2_attrs); return 0; errv2attr: nla_nest_cancel(skb, v2_attrs); return -EMSGSIZE; } static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *v2_attrs; v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2); if (!v2_attrs) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_CONNS, lgr->max_conns)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_LINKS, lgr->max_links)) goto errv2attr; nla_nest_end(skb, v2_attrs); return 0; errv2attr: nla_nest_cancel(skb, v2_attrs); errattr: return -EMSGSIZE; } static int smc_nl_fill_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { char smc_target[SMC_MAX_PNETID_LEN + 1]; struct nlattr *attrs, *v2_attrs; attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR); if (!attrs) goto errout; if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id))) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE, lgr->net->net_cookie, SMC_NLA_LGR_R_PAD)) goto errattr; memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN); smc_target[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target)) goto errattr; if (nla_put_uint(skb, SMC_NLA_LGR_R_SNDBUF_ALLOC, lgr->alloc_sndbufs)) goto errattr; if (nla_put_uint(skb, SMC_NLA_LGR_R_RMB_ALLOC, lgr->alloc_rmbs)) goto errattr; if (lgr->smc_version > SMC_V1) { v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON); if (!v2_attrs) goto errattr; if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) goto errattr; if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb)) goto errattr; } nla_nest_end(skb, attrs); return 0; errattr: nla_nest_cancel(skb, attrs); errout: return -EMSGSIZE; } static int smc_nl_fill_lgr_link(struct smc_link_group *lgr, struct smc_link *link, struct sk_buff *skb, struct netlink_callback *cb) { char smc_ibname[IB_DEVICE_NAME_MAX]; u8 smc_gid_target[41]; struct nlattr *attrs; u32 link_uid = 0; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LINK_SMCR); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR); if (!attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT, atomic_read(&link->conn_cnt))) goto errattr; if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx)) goto errattr; snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname); if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname)) goto errattr; memcpy(&link_uid, link->link_uid, sizeof(link_uid)); if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid)) goto errattr; memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid)); if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid)) goto errattr; memset(smc_gid_target, 0, sizeof(smc_gid_target)); smc_gid_be16_convert(smc_gid_target, link->gid); if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target)) goto errattr; memset(smc_gid_target, 0, sizeof(smc_gid_target)); smc_gid_be16_convert(smc_gid_target, link->peer_gid); if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target)) goto errattr; nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static int smc_nl_handle_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb, bool list_links) { void *nlh; int i; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LGR_SMCR); if (!nlh) goto errmsg; if (smc_nl_fill_lgr(lgr, skb, cb)) goto errout; genlmsg_end(skb, nlh); if (!list_links) goto out; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_usable(&lgr->lnk[i])) continue; if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb)) goto errout; } out: return 0; errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr, struct sk_buff *skb, struct netlink_callback *cb, bool list_links) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_link_group *lgr; int snum = cb_ctx->pos[0]; int num = 0; spin_lock_bh(&smc_lgr->lock); list_for_each_entry(lgr, &smc_lgr->list, list) { if (num < snum) goto next; if (smc_nl_handle_lgr(lgr, skb, cb, list_links)) goto errout; next: num++; } errout: spin_unlock_bh(&smc_lgr->lock); cb_ctx->pos[0] = num; } static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct smcd_dev *smcd = lgr->smcd; struct smcd_gid smcd_gid; struct nlattr *attrs; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LGR_SMCD); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD); if (!attrs) goto errout; if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id))) goto errattr; smcd->ops->get_local_gid(smcd, &smcd_gid); if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, smcd_gid.gid, SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_EXT_GID, smcd_gid.gid_ext, SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid.gid, SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_EXT_GID, lgr->peer_gid.gid_ext, SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd))) goto errattr; if (nla_put_uint(skb, SMC_NLA_LGR_D_SNDBUF_ALLOC, lgr->alloc_sndbufs)) goto errattr; if (nla_put_uint(skb, SMC_NLA_LGR_D_DMB_ALLOC, lgr->alloc_rmbs)) goto errattr; memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet)) goto errattr; if (lgr->smc_version > SMC_V1) { struct nlattr *v2_attrs; v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON); if (!v2_attrs) goto errattr; if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_link_group *lgr; int snum = cb_ctx->pos[1]; int rc = 0, num = 0; spin_lock_bh(&dev->lgr_lock); list_for_each_entry(lgr, &dev->lgr_list, list) { if (!lgr->is_smcd) continue; if (num < snum) goto next; rc = smc_nl_fill_smcd_lgr(lgr, skb, cb); if (rc) goto errout; next: num++; } errout: spin_unlock_bh(&dev->lgr_lock); cb_ctx->pos[1] = num; return rc; } static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smcd_dev *smcd_dev; int snum = cb_ctx->pos[0]; int rc = 0, num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcd_dev, &dev_list->list, list) { if (list_empty(&smcd_dev->lgr_list)) continue; if (num < snum) goto next; rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb); if (rc) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; return rc; } int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) { bool list_links = false; smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links); return skb->len; } int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb) { bool list_links = true; smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links); return skb->len; } int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb); return skb->len; } void smc_lgr_cleanup_early(struct smc_link_group *lgr) { spinlock_t *lgr_lock; if (!lgr) return; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); /* do not use this link group for new connections */ if (!list_empty(&lgr->list)) list_del_init(&lgr->list); spin_unlock_bh(lgr_lock); __smc_lgr_terminate(lgr, true); } static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr) { int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (smc_link_sendable(lnk)) lnk->state = SMC_LNK_INACTIVE; } wake_up_all(&lgr->llc_msg_waiter); wake_up_all(&lgr->llc_flow_waiter); } static void smc_lgr_free(struct smc_link_group *lgr); static void smc_lgr_free_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(to_delayed_work(work), struct smc_link_group, free_work); spinlock_t *lgr_lock; bool conns; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); if (lgr->freeing) { spin_unlock_bh(lgr_lock); return; } read_lock_bh(&lgr->conns_lock); conns = RB_EMPTY_ROOT(&lgr->conns_all); read_unlock_bh(&lgr->conns_lock); if (!conns) { /* number of lgr connections is no longer zero */ spin_unlock_bh(lgr_lock); return; } list_del_init(&lgr->list); /* remove from smc_lgr_list */ lgr->freeing = 1; /* this instance does the freeing, no new schedule */ spin_unlock_bh(lgr_lock); cancel_delayed_work(&lgr->free_work); if (!lgr->is_smcd && !lgr->terminating) smc_llc_send_link_delete_all(lgr, true, SMC_LLC_DEL_PROG_INIT_TERM); if (lgr->is_smcd && !lgr->terminating) smc_ism_signal_shutdown(lgr); if (!lgr->is_smcd) smcr_lgr_link_deactivate_all(lgr); smc_lgr_free(lgr); } static void smc_lgr_terminate_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, terminate_work); __smc_lgr_terminate(lgr, true); } /* return next unique link id for the lgr */ static u8 smcr_next_link_id(struct smc_link_group *lgr) { u8 link_id; int i; while (1) { again: link_id = ++lgr->next_link_id; if (!link_id) /* skip zero as link_id */ link_id = ++lgr->next_link_id; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (smc_link_usable(&lgr->lnk[i]) && lgr->lnk[i].link_id == link_id) goto again; } break; } return link_id; } static void smcr_copy_dev_info_to_link(struct smc_link *link) { struct smc_ib_device *smcibdev = link->smcibdev; snprintf(link->ibname, sizeof(link->ibname), "%s", smcibdev->ibdev->name); link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1]; } int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, u8 link_idx, struct smc_init_info *ini) { struct smc_ib_device *smcibdev; u8 rndvec[3]; int rc; if (lgr->smc_version == SMC_V2) { lnk->smcibdev = ini->smcrv2.ib_dev_v2; lnk->ibport = ini->smcrv2.ib_port_v2; lnk->wr_rx_sge_cnt = lnk->smcibdev->ibdev->attrs.max_recv_sge < 2 ? 1 : 2; lnk->wr_rx_buflen = smc_link_shared_v2_rxbuf(lnk) ? SMC_WR_BUF_SIZE : SMC_WR_BUF_V2_SIZE; } else { lnk->smcibdev = ini->ib_dev; lnk->ibport = ini->ib_port; lnk->wr_rx_sge_cnt = 1; lnk->wr_rx_buflen = SMC_WR_BUF_SIZE; } get_device(&lnk->smcibdev->ibdev->dev); atomic_inc(&lnk->smcibdev->lnk_cnt); refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */ lnk->clearing = 0; lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu; lnk->link_id = smcr_next_link_id(lgr); lnk->lgr = lgr; smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ lnk->link_idx = link_idx; lnk->wr_rx_id_compl = 0; smc_ibdev_cnt_inc(lnk); smcr_copy_dev_info_to_link(lnk); atomic_set(&lnk->conn_cnt, 0); smc_llc_link_set_uid(lnk); INIT_WORK(&lnk->link_down_wrk, smc_link_down_work); if (!lnk->smcibdev->initialized) { rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev); if (rc) goto out; } get_random_bytes(rndvec, sizeof(rndvec)); lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport, ini->vlan_id, lnk->gid, &lnk->sgid_index, lgr->smc_version == SMC_V2 ? &ini->smcrv2 : NULL); if (rc) goto out; rc = smc_llc_link_init(lnk); if (rc) goto out; rc = smc_wr_alloc_link_mem(lnk); if (rc) goto clear_llc_lnk; rc = smc_ib_create_protection_domain(lnk); if (rc) goto free_link_mem; rc = smc_ib_create_queue_pair(lnk); if (rc) goto dealloc_pd; rc = smc_wr_create_link(lnk); if (rc) goto destroy_qp; lnk->state = SMC_LNK_ACTIVATING; return 0; destroy_qp: smc_ib_destroy_queue_pair(lnk); dealloc_pd: smc_ib_dealloc_protection_domain(lnk); free_link_mem: smc_wr_free_link_mem(lnk); clear_llc_lnk: smc_llc_link_clear(lnk, false); out: smc_ibdev_cnt_dec(lnk); put_device(&lnk->smcibdev->ibdev->dev); smcibdev = lnk->smcibdev; memset(lnk, 0, sizeof(struct smc_link)); lnk->state = SMC_LNK_UNUSED; if (!atomic_dec_return(&smcibdev->lnk_cnt)) wake_up(&smcibdev->lnks_deleted); smc_lgr_put(lgr); /* lgr_hold above */ return rc; } /* create a new SMC link group */ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) { struct smc_link_group *lgr; struct list_head *lgr_list; struct smcd_dev *smcd; struct smc_link *lnk; spinlock_t *lgr_lock; u8 link_idx; int rc = 0; int i; if (ini->is_smcd && ini->vlan_id) { if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id)) { rc = SMC_CLC_DECL_ISMVLANERR; goto out; } } lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { rc = SMC_CLC_DECL_MEM; goto ism_put_vlan; } lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0, SMC_LGR_ID_SIZE, &lgr->id); if (!lgr->tx_wq) { rc = -ENOMEM; goto free_lgr; } lgr->is_smcd = ini->is_smcd; lgr->sync_err = 0; lgr->terminating = 0; lgr->freeing = 0; lgr->vlan_id = ini->vlan_id; refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */ init_rwsem(&lgr->sndbufs_lock); init_rwsem(&lgr->rmbs_lock); rwlock_init(&lgr->conns_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]); } lgr->next_link_id = 0; smc_lgr_list.num += SMC_LGR_NUM_INCR; memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work); lgr->conns_all = RB_ROOT; if (ini->is_smcd) { /* SMC-D specific settings */ smcd = ini->ism_dev[ini->ism_selected]; get_device(smcd->ops->get_dev(smcd)); lgr->peer_gid.gid = ini->ism_peer_gid[ini->ism_selected].gid; lgr->peer_gid.gid_ext = ini->ism_peer_gid[ini->ism_selected].gid_ext; lgr->smcd = ini->ism_dev[ini->ism_selected]; lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list; lgr_lock = &lgr->smcd->lgr_lock; lgr->smc_version = ini->smcd_version; lgr->peer_shutdown = 0; atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt); } else { /* SMC-R specific settings */ struct smc_ib_device *ibdev; int ibport; lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; lgr->smc_version = ini->smcr_version; memcpy(lgr->peer_systemid, ini->peer_systemid, SMC_SYSTEMID_LEN); if (lgr->smc_version == SMC_V2) { ibdev = ini->smcrv2.ib_dev_v2; ibport = ini->smcrv2.ib_port_v2; lgr->saddr = ini->smcrv2.saddr; lgr->uses_gateway = ini->smcrv2.uses_gateway; memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac, ETH_ALEN); lgr->max_conns = ini->max_conns; lgr->max_links = ini->max_links; } else { ibdev = ini->ib_dev; ibport = ini->ib_port; lgr->max_conns = SMC_CONN_PER_LGR_MAX; lgr->max_links = SMC_LINKS_ADD_LNK_MAX; } memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1], SMC_MAX_PNETID_LEN); rc = smc_wr_alloc_lgr_mem(lgr); if (rc) goto free_wq; smc_llc_lgr_init(lgr, smc); link_idx = SMC_SINGLE_LINK; lnk = &lgr->lnk[link_idx]; rc = smcr_link_init(lgr, lnk, link_idx, ini); if (rc) { smc_wr_free_lgr_mem(lgr); goto free_wq; } lgr->net = smc_ib_net(lnk->smcibdev); lgr_list = &smc_lgr_list.list; lgr_lock = &smc_lgr_list.lock; lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type; atomic_inc(&lgr_cnt); } smc->conn.lgr = lgr; spin_lock_bh(lgr_lock); list_add_tail(&lgr->list, lgr_list); spin_unlock_bh(lgr_lock); return 0; free_wq: destroy_workqueue(lgr->tx_wq); free_lgr: kfree(lgr); ism_put_vlan: if (ini->is_smcd && ini->vlan_id) smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id); out: if (rc < 0) { if (rc == -ENOMEM) rc = SMC_CLC_DECL_MEM; else rc = SMC_CLC_DECL_INTERR; } return rc; } static int smc_write_space(struct smc_connection *conn) { int buffer_len = conn->peer_rmbe_size; union smc_host_cursor prod; union smc_host_cursor cons; int space; smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); /* determine rx_buf space */ space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod); return space; } static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend, struct smc_wr_buf *wr_buf) { struct smc_connection *conn = &smc->conn; union smc_host_cursor cons, fin; int rc = 0; int diff; smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn); smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn); /* set prod cursor to old state, enforce tx_rdma_writes() */ smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn); smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) { /* cons cursor advanced more than fin, and prod was set * fin above, so now prod is smaller than cons. Fix that. */ diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons); smc_curs_add(conn->sndbuf_desc->len, &conn->tx_curs_sent, diff); smc_curs_add(conn->sndbuf_desc->len, &conn->tx_curs_fin, diff); smp_mb__before_atomic(); atomic_add(diff, &conn->sndbuf_space); smp_mb__after_atomic(); smc_curs_add(conn->peer_rmbe_size, &conn->local_tx_ctrl.prod, diff); smc_curs_add(conn->peer_rmbe_size, &conn->local_tx_ctrl_fin, diff); } /* recalculate, value is used by tx_rdma_writes() */ atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn)); if (smc->sk.sk_state != SMC_INIT && smc->sk.sk_state != SMC_CLOSED) { rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf); if (!rc) { queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0); smc->sk.sk_data_ready(&smc->sk); } } else { smc_wr_tx_put_slot(conn->lnk, (struct smc_wr_tx_pend_priv *)pend); } return rc; } void smc_switch_link_and_count(struct smc_connection *conn, struct smc_link *to_lnk) { atomic_dec(&conn->lnk->conn_cnt); /* link_hold in smc_conn_create() */ smcr_link_put(conn->lnk); conn->lnk = to_lnk; atomic_inc(&conn->lnk->conn_cnt); /* link_put in smc_conn_free() */ smcr_link_hold(conn->lnk); } struct smc_link *smc_switch_conns(struct smc_link_group *lgr, struct smc_link *from_lnk, bool is_dev_err) { struct smc_link *to_lnk = NULL; struct smc_cdc_tx_pend *pend; struct smc_connection *conn; struct smc_wr_buf *wr_buf; struct smc_sock *smc; struct rb_node *node; int i, rc = 0; /* link is inactive, wake up tx waiters */ smc_wr_wakeup_tx_wait(from_lnk); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx) continue; if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev && from_lnk->ibport == lgr->lnk[i].ibport) { continue; } to_lnk = &lgr->lnk[i]; break; } if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) { smc_lgr_terminate_sched(lgr); return NULL; } again: read_lock_bh(&lgr->conns_lock); for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) { conn = rb_entry(node, struct smc_connection, alert_node); if (conn->lnk != from_lnk) continue; smc = container_of(conn, struct smc_sock, conn); /* conn->lnk not yet set in SMC_INIT state */ if (smc->sk.sk_state == SMC_INIT) continue; if (smc->sk.sk_state == SMC_CLOSED || smc->sk.sk_state == SMC_PEERCLOSEWAIT1 || smc->sk.sk_state == SMC_PEERCLOSEWAIT2 || smc->sk.sk_state == SMC_APPFINCLOSEWAIT || smc->sk.sk_state == SMC_APPCLOSEWAIT1 || smc->sk.sk_state == SMC_APPCLOSEWAIT2 || smc->sk.sk_state == SMC_PEERFINCLOSEWAIT || smc->sk.sk_state == SMC_PEERABORTWAIT || smc->sk.sk_state == SMC_PROCESSABORT) { spin_lock_bh(&conn->send_lock); smc_switch_link_and_count(conn, to_lnk); spin_unlock_bh(&conn->send_lock); continue; } sock_hold(&smc->sk); read_unlock_bh(&lgr->conns_lock); /* pre-fetch buffer outside of send_lock, might sleep */ rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend); if (rc) goto err_out; /* avoid race with smcr_tx_sndbuf_nonempty() */ spin_lock_bh(&conn->send_lock); smc_switch_link_and_count(conn, to_lnk); rc = smc_switch_cursor(smc, pend, wr_buf); spin_unlock_bh(&conn->send_lock); sock_put(&smc->sk); if (rc) goto err_out; goto again; } read_unlock_bh(&lgr->conns_lock); smc_wr_tx_link_put(to_lnk); return to_lnk; err_out: smcr_link_down_cond_sched(to_lnk); smc_wr_tx_link_put(to_lnk); return NULL; } static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link_group *lgr) { struct rw_semaphore *lock; /* lock buffer list */ int rc; if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) { /* unregister rmb with peer */ rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); if (!rc) { /* protect against smc_llc_cli_rkey_exchange() */ down_read(&lgr->llc_conf_mutex); smc_llc_do_delete_rkey(lgr, buf_desc); buf_desc->is_conf_rkey = false; up_read(&lgr->llc_conf_mutex); smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } } if (buf_desc->is_reg_err) { /* buf registration failed, reuse not possible */ lock = is_rmb ? &lgr->rmbs_lock : &lgr->sndbufs_lock; down_write(lock); smc_lgr_buf_list_del(lgr, is_rmb, buf_desc); up_write(lock); smc_buf_free(lgr, is_rmb, buf_desc); } else { /* memzero_explicit provides potential memory barrier semantics */ memzero_explicit(buf_desc->cpu_addr, buf_desc->len); WRITE_ONCE(buf_desc->used, 0); } } static void smcd_buf_detach(struct smc_connection *conn) { struct smcd_dev *smcd = conn->lgr->smcd; u64 peer_token = conn->peer_token; if (!conn->sndbuf_desc) return; smc_ism_detach_dmb(smcd, peer_token); kfree(conn->sndbuf_desc); conn->sndbuf_desc = NULL; } static void smc_buf_unuse(struct smc_connection *conn, struct smc_link_group *lgr) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); bool is_smcd = lgr->is_smcd; int bufsize; if (conn->sndbuf_desc) { bufsize = conn->sndbuf_desc->len; if (!is_smcd && conn->sndbuf_desc->is_vm) { smcr_buf_unuse(conn->sndbuf_desc, false, lgr); } else { memzero_explicit(conn->sndbuf_desc->cpu_addr, bufsize); WRITE_ONCE(conn->sndbuf_desc->used, 0); } SMC_STAT_RMB_SIZE(smc, is_smcd, false, false, bufsize); } if (conn->rmb_desc) { bufsize = conn->rmb_desc->len; if (!is_smcd) { smcr_buf_unuse(conn->rmb_desc, true, lgr); } else { bufsize += sizeof(struct smcd_cdc_msg); memzero_explicit(conn->rmb_desc->cpu_addr, bufsize); WRITE_ONCE(conn->rmb_desc->used, 0); } SMC_STAT_RMB_SIZE(smc, is_smcd, true, false, bufsize); } } /* remove a finished connection from its link group */ void smc_conn_free(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; if (!lgr || conn->freed) /* Connection has never been registered in a * link group, or has already been freed. */ return; conn->freed = 1; if (!smc_conn_lgr_valid(conn)) /* Connection has already unregistered from * link group. */ goto lgr_put; if (lgr->is_smcd) { if (!list_empty(&lgr->list)) smc_ism_unset_conn(conn); if (smc_ism_support_dmb_nocopy(lgr->smcd)) smcd_buf_detach(conn); tasklet_kill(&conn->rx_tsklet); } else { smc_cdc_wait_pend_tx_wr(conn); if (current_work() != &conn->abort_work) cancel_work_sync(&conn->abort_work); } if (!list_empty(&lgr->list)) { smc_buf_unuse(conn, lgr); /* allow buffer reuse */ smc_lgr_unregister_conn(conn); } if (!lgr->conns_num) smc_lgr_schedule_free_work(lgr); lgr_put: if (!lgr->is_smcd) smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */ smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */ } /* unregister a link from a buf_desc */ static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link *lnk) { if (is_rmb || buf_desc->is_vm) buf_desc->is_reg_mr[lnk->link_idx] = false; if (!buf_desc->is_map_ib[lnk->link_idx]) return; if ((is_rmb || buf_desc->is_vm) && buf_desc->mr[lnk->link_idx]) { smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]); buf_desc->mr[lnk->link_idx] = NULL; } if (is_rmb) smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE); else smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE); sg_free_table(&buf_desc->sgt[lnk->link_idx]); buf_desc->is_map_ib[lnk->link_idx] = false; } /* unmap all buffers of lgr for a deleted link */ static void smcr_buf_unmap_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_buf_desc *buf_desc, *bf; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { down_write(&lgr->rmbs_lock); list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) smcr_buf_unmap_link(buf_desc, true, lnk); up_write(&lgr->rmbs_lock); down_write(&lgr->sndbufs_lock); list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) smcr_buf_unmap_link(buf_desc, false, lnk); up_write(&lgr->sndbufs_lock); } } static void smcr_rtoken_clear_link(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { lgr->rtokens[i][lnk->link_idx].rkey = 0; lgr->rtokens[i][lnk->link_idx].dma_addr = 0; } } static void __smcr_link_clear(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_ib_device *smcibdev; smc_wr_free_link_mem(lnk); smc_ibdev_cnt_dec(lnk); put_device(&lnk->smcibdev->ibdev->dev); smcibdev = lnk->smcibdev; memset(lnk, 0, sizeof(struct smc_link)); lnk->state = SMC_LNK_UNUSED; if (!atomic_dec_return(&smcibdev->lnk_cnt)) wake_up(&smcibdev->lnks_deleted); smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */ } /* must be called under lgr->llc_conf_mutex lock */ void smcr_link_clear(struct smc_link *lnk, bool log) { if (!lnk->lgr || lnk->clearing || lnk->state == SMC_LNK_UNUSED) return; lnk->clearing = 1; lnk->peer_qpn = 0; smc_llc_link_clear(lnk, log); smcr_buf_unmap_lgr(lnk); smcr_rtoken_clear_link(lnk); smc_ib_modify_qp_error(lnk); smc_wr_free_link(lnk); smc_ib_destroy_queue_pair(lnk); smc_ib_dealloc_protection_domain(lnk); smcr_link_put(lnk); /* theoretically last link_put */ } void smcr_link_hold(struct smc_link *lnk) { refcount_inc(&lnk->refcnt); } void smcr_link_put(struct smc_link *lnk) { if (refcount_dec_and_test(&lnk->refcnt)) __smcr_link_clear(lnk); } static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc) { int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]); if (!buf_desc->is_vm && buf_desc->pages) __free_pages(buf_desc->pages, buf_desc->order); else if (buf_desc->is_vm && buf_desc->cpu_addr) vfree(buf_desc->cpu_addr); kfree(buf_desc); } static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb, struct smc_buf_desc *buf_desc) { if (is_dmb) { /* restore original buf len */ buf_desc->len += sizeof(struct smcd_cdc_msg); smc_ism_unregister_dmb(lgr->smcd, buf_desc); } else { kfree(buf_desc->cpu_addr); } kfree(buf_desc); } static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc) { if (lgr->is_smcd) smcd_buf_free(lgr, is_rmb, buf_desc); else smcr_buf_free(lgr, is_rmb, buf_desc); } static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf_desc; struct list_head *buf_list; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { if (is_rmb) buf_list = &lgr->rmbs[i]; else buf_list = &lgr->sndbufs[i]; list_for_each_entry_safe(buf_desc, bf_desc, buf_list, list) { smc_lgr_buf_list_del(lgr, is_rmb, buf_desc); smc_buf_free(lgr, is_rmb, buf_desc); } } } static void smc_lgr_free_bufs(struct smc_link_group *lgr) { /* free send buffers */ __smc_lgr_free_bufs(lgr, false); /* free rmbs */ __smc_lgr_free_bufs(lgr, true); } /* won't be freed until no one accesses to lgr anymore */ static void __smc_lgr_free(struct smc_link_group *lgr) { smc_lgr_free_bufs(lgr); if (lgr->is_smcd) { if (!atomic_dec_return(&lgr->smcd->lgr_cnt)) wake_up(&lgr->smcd->lgrs_deleted); } else { smc_wr_free_lgr_mem(lgr); if (!atomic_dec_return(&lgr_cnt)) wake_up(&lgrs_deleted); } kfree(lgr); } /* remove a link group */ static void smc_lgr_free(struct smc_link_group *lgr) { int i; if (!lgr->is_smcd) { down_write(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state != SMC_LNK_UNUSED) smcr_link_clear(&lgr->lnk[i], false); } up_write(&lgr->llc_conf_mutex); smc_llc_lgr_clear(lgr); } destroy_workqueue(lgr->tx_wq); if (lgr->is_smcd) { smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); put_device(lgr->smcd->ops->get_dev(lgr->smcd)); } smc_lgr_put(lgr); /* theoretically last lgr_put */ } void smc_lgr_hold(struct smc_link_group *lgr) { refcount_inc(&lgr->refcnt); } void smc_lgr_put(struct smc_link_group *lgr) { if (refcount_dec_and_test(&lgr->refcnt)) __smc_lgr_free(lgr); } static void smc_sk_wake_ups(struct smc_sock *smc) { smc->sk.sk_write_space(&smc->sk); smc->sk.sk_data_ready(&smc->sk); smc->sk.sk_state_change(&smc->sk); } /* kill a connection */ static void smc_conn_kill(struct smc_connection *conn, bool soft) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); if (conn->lgr->is_smcd && conn->lgr->peer_shutdown) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; else smc_close_abort(conn); conn->killed = 1; smc->sk.sk_err = ECONNABORTED; smc_sk_wake_ups(smc); if (conn->lgr->is_smcd) { smc_ism_unset_conn(conn); if (smc_ism_support_dmb_nocopy(conn->lgr->smcd)) smcd_buf_detach(conn); if (soft) tasklet_kill(&conn->rx_tsklet); else tasklet_unlock_wait(&conn->rx_tsklet); } else { smc_cdc_wait_pend_tx_wr(conn); } smc_lgr_unregister_conn(conn); smc_close_active_abort(smc); } static void smc_lgr_cleanup(struct smc_link_group *lgr) { if (lgr->is_smcd) { smc_ism_signal_shutdown(lgr); } else { u32 rsn = lgr->llc_termination_rsn; if (!rsn) rsn = SMC_LLC_DEL_PROG_INIT_TERM; smc_llc_send_link_delete_all(lgr, false, rsn); smcr_lgr_link_deactivate_all(lgr); } } /* terminate link group * @soft: true if link group shutdown can take its time * false if immediate link group shutdown is required */ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft) { struct smc_connection *conn; struct smc_sock *smc; struct rb_node *node; if (lgr->terminating) return; /* lgr already terminating */ /* cancel free_work sync, will terminate when lgr->freeing is set */ cancel_delayed_work(&lgr->free_work); lgr->terminating = 1; /* kill remaining link group connections */ read_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); while (node) { read_unlock_bh(&lgr->conns_lock); conn = rb_entry(node, struct smc_connection, alert_node); smc = container_of(conn, struct smc_sock, conn); sock_hold(&smc->sk); /* sock_put below */ lock_sock(&smc->sk); smc_conn_kill(conn, soft); release_sock(&smc->sk); sock_put(&smc->sk); /* sock_hold above */ read_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); } read_unlock_bh(&lgr->conns_lock); smc_lgr_cleanup(lgr); smc_lgr_free(lgr); } /* unlink link group and schedule termination */ void smc_lgr_terminate_sched(struct smc_link_group *lgr) { spinlock_t *lgr_lock; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) { spin_unlock_bh(lgr_lock); return; /* lgr already terminating */ } list_del_init(&lgr->list); lgr->freeing = 1; spin_unlock_bh(lgr_lock); schedule_work(&lgr->terminate_work); } /* Called when peer lgr shutdown (regularly or abnormally) is received */ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid, unsigned short vlan) { struct smc_link_group *lgr, *l; LIST_HEAD(lgr_free_list); /* run common cleanup function and build free list */ spin_lock_bh(&dev->lgr_lock); list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) { if ((!peer_gid->gid || (lgr->peer_gid.gid == peer_gid->gid && !smc_ism_is_emulated(dev) ? 1 : lgr->peer_gid.gid_ext == peer_gid->gid_ext)) && (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { if (peer_gid->gid) /* peer triggered termination */ lgr->peer_shutdown = 1; list_move(&lgr->list, &lgr_free_list); lgr->freeing = 1; } } spin_unlock_bh(&dev->lgr_lock); /* cancel the regular free workers and actually free lgrs */ list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { list_del_init(&lgr->list); schedule_work(&lgr->terminate_work); } } /* Called when an SMCD device is removed or the smc module is unloaded */ void smc_smcd_terminate_all(struct smcd_dev *smcd) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); spin_lock_bh(&smcd->lgr_lock); list_splice_init(&smcd->lgr_list, &lgr_free_list); list_for_each_entry(lgr, &lgr_free_list, list) lgr->freeing = 1; spin_unlock_bh(&smcd->lgr_lock); list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) { list_del_init(&lgr->list); __smc_lgr_terminate(lgr, false); } if (atomic_read(&smcd->lgr_cnt)) wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt)); } /* Called when an SMCR device is removed or the smc module is unloaded. * If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is NULL, all SMCR link groups are terminated. */ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); int i; spin_lock_bh(&smc_lgr_list.lock); if (!smcibdev) { list_splice_init(&smc_lgr_list.list, &lgr_free_list); list_for_each_entry(lgr, &lgr_free_list, list) lgr->freeing = 1; } else { list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].smcibdev == smcibdev) smcr_link_down_cond_sched(&lgr->lnk[i]); } } } spin_unlock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) { list_del_init(&lgr->list); smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM); __smc_lgr_terminate(lgr, false); } if (smcibdev) { if (atomic_read(&smcibdev->lnk_cnt)) wait_event(smcibdev->lnks_deleted, !atomic_read(&smcibdev->lnk_cnt)); } else { if (atomic_read(&lgr_cnt)) wait_event(lgrs_deleted, !atomic_read(&lgr_cnt)); } } /* set new lgr type and clear all asymmetric link tagging */ void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type) { char *lgr_type = ""; int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) if (smc_link_usable(&lgr->lnk[i])) lgr->lnk[i].link_is_asym = false; if (lgr->type == new_type) return; lgr->type = new_type; switch (lgr->type) { case SMC_LGR_NONE: lgr_type = "NONE"; break; case SMC_LGR_SINGLE: lgr_type = "SINGLE"; break; case SMC_LGR_SYMMETRIC: lgr_type = "SYMMETRIC"; break; case SMC_LGR_ASYMMETRIC_PEER: lgr_type = "ASYMMETRIC_PEER"; break; case SMC_LGR_ASYMMETRIC_LOCAL: lgr_type = "ASYMMETRIC_LOCAL"; break; } pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: " "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie, lgr_type, lgr->pnet_id); } /* set new lgr type and tag a link as asymmetric */ void smcr_lgr_set_type_asym(struct smc_link_group *lgr, enum smc_lgr_type new_type, int asym_lnk_idx) { smcr_lgr_set_type(lgr, new_type); lgr->lnk[asym_lnk_idx].link_is_asym = true; } /* abort connection, abort_work scheduled from tasklet context */ static void smc_conn_abort_work(struct work_struct *work) { struct smc_connection *conn = container_of(work, struct smc_connection, abort_work); struct smc_sock *smc = container_of(conn, struct smc_sock, conn); lock_sock(&smc->sk); smc_conn_kill(conn, true); release_sock(&smc->sk); sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ } void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { struct smc_link *link; if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN) || lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER || !rdma_dev_access_netns(smcibdev->ibdev, lgr->net)) continue; if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) continue; /* trigger local add link processing */ link = smc_llc_usable_link(lgr); if (link) smc_llc_add_link_local(link); } spin_unlock_bh(&smc_lgr_list.lock); } /* link is down - switch connections to alternate link, * must be called under lgr->llc_conf_mutex lock */ static void smcr_link_down(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_link *to_lnk; int del_link_id; if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list)) return; to_lnk = smc_switch_conns(lgr, lnk, true); if (!to_lnk) { /* no backup link available */ smcr_link_clear(lnk, true); return; } smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); del_link_id = lnk->link_id; if (lgr->role == SMC_SERV) { /* trigger local delete link processing */ smc_llc_srv_delete_link_local(to_lnk, del_link_id); } else { if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* another llc task is ongoing */ up_write(&lgr->llc_conf_mutex); wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), SMC_LLC_WAIT_TIME); down_write(&lgr->llc_conf_mutex); } if (!list_empty(&lgr->list)) { smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true, SMC_LLC_DEL_LOST_PATH); smcr_link_clear(lnk, true); } wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ } } /* must be called under lgr->llc_conf_mutex lock */ void smcr_link_down_cond(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); smcr_link_down(lnk); } } /* will get the lgr->llc_conf_mutex lock */ void smcr_link_down_cond_sched(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */ if (!schedule_work(&lnk->link_down_wrk)) smcr_link_put(lnk); } } void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; int i; list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN)) continue; /* lgr is not affected */ if (list_empty(&lgr->list)) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (smc_link_usable(lnk) && lnk->smcibdev == smcibdev && lnk->ibport == ibport) smcr_link_down_cond_sched(lnk); } } } static void smc_link_down_work(struct work_struct *work) { struct smc_link *link = container_of(work, struct smc_link, link_down_wrk); struct smc_link_group *lgr = link->lgr; if (list_empty(&lgr->list)) goto out; wake_up_all(&lgr->llc_msg_waiter); down_write(&lgr->llc_conf_mutex); smcr_link_down(link); up_write(&lgr->llc_conf_mutex); out: smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */ } static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, struct netdev_nested_priv *priv) { unsigned short *vlan_id = (unsigned short *)priv->data; if (is_vlan_dev(lower_dev)) { *vlan_id = vlan_dev_vlan_id(lower_dev); return 1; } return 0; } /* Determine vlan of internal TCP socket. */ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(clcsock->sk); struct netdev_nested_priv priv; struct net_device *ndev; int rc = 0; ini->vlan_id = 0; if (!dst) { rc = -ENOTCONN; goto out; } if (!dst->dev) { rc = -ENODEV; goto out_rel; } ndev = dst->dev; if (is_vlan_dev(ndev)) { ini->vlan_id = vlan_dev_vlan_id(ndev); goto out_rel; } priv.data = (void *)&ini->vlan_id; rtnl_lock(); netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv); rtnl_unlock(); out_rel: dst_release(dst); out: return rc; } static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version, u8 peer_systemid[], u8 peer_gid[], u8 peer_mac_v1[], enum smc_lgr_role role, u32 clcqpn, struct net *net) { struct smc_link *lnk; int i; if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) || lgr->role != role) return false; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { lnk = &lgr->lnk[i]; if (!smc_link_active(lnk)) continue; /* use verbs API to check netns, instead of lgr->net */ if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net)) return false; if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) && !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) && (smcr_version == SMC_V2 || !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN))) return true; } return false; } static bool smcd_lgr_match(struct smc_link_group *lgr, struct smcd_dev *smcismdev, struct smcd_gid *peer_gid) { if (lgr->peer_gid.gid != peer_gid->gid || lgr->smcd != smcismdev) return false; if (smc_ism_is_emulated(smcismdev) && lgr->peer_gid.gid_ext != peer_gid->gid_ext) return false; return true; } /* create a new SMC connection (and a new link group if necessary) */ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) { struct smc_connection *conn = &smc->conn; struct net *net = sock_net(&smc->sk); struct list_head *lgr_list; struct smc_link_group *lgr; enum smc_lgr_role role; spinlock_t *lgr_lock; int rc = 0; lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list : &smc_lgr_list.list; lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock : &smc_lgr_list.lock; ini->first_contact_local = 1; role = smc->listen_smc ? SMC_SERV : SMC_CLNT; if (role == SMC_CLNT && ini->first_contact_peer) /* create new link group as well */ goto create; /* determine if an existing link group can be reused */ spin_lock_bh(lgr_lock); list_for_each_entry(lgr, lgr_list, list) { write_lock_bh(&lgr->conns_lock); if ((ini->is_smcd ? smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected], &ini->ism_peer_gid[ini->ism_selected]) : smcr_lgr_match(lgr, ini->smcr_version, ini->peer_systemid, ini->peer_gid, ini->peer_mac, role, ini->ib_clcqpn, net)) && !lgr->sync_err && (ini->smcd_version == SMC_V2 || lgr->vlan_id == ini->vlan_id) && (role == SMC_CLNT || ini->is_smcd || (lgr->conns_num < lgr->max_conns && !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) { /* link group found */ ini->first_contact_local = 0; conn->lgr = lgr; rc = smc_lgr_register_conn(conn, false); write_unlock_bh(&lgr->conns_lock); if (!rc && delayed_work_pending(&lgr->free_work)) cancel_delayed_work(&lgr->free_work); break; } write_unlock_bh(&lgr->conns_lock); } spin_unlock_bh(lgr_lock); if (rc) return rc; if (role == SMC_CLNT && !ini->first_contact_peer && ini->first_contact_local) { /* Server reuses a link group, but Client wants to start * a new one * send out_of_sync decline, reason synchr. error */ return SMC_CLC_DECL_SYNCERR; } create: if (ini->first_contact_local) { rc = smc_lgr_create(smc, ini); if (rc) goto out; lgr = conn->lgr; write_lock_bh(&lgr->conns_lock); rc = smc_lgr_register_conn(conn, true); write_unlock_bh(&lgr->conns_lock); if (rc) { smc_lgr_cleanup_early(lgr); goto out; } } smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */ if (!conn->lgr->is_smcd) smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */ conn->freed = 0; conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->urg_state = SMC_URG_READ; init_waitqueue_head(&conn->cdc_pend_tx_wq); INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work); if (ini->is_smcd) { conn->rx_off = sizeof(struct smcd_cdc_msg); smcd_cdc_rx_init(conn); /* init tasklet for this conn */ } else { conn->rx_off = 0; } #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); #endif out: return rc; } #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ #define SMCR_RMBE_SIZES 15 /* 0 -> 16KB, 1 -> 32KB, .. 15 -> 512MB */ /* convert the RMB size into the compressed notation (minimum 16K, see * SMCD/R_DMBE_SIZES. * In contrast to plain ilog2, this rounds towards the next power of 2, * so the socket application gets at least its desired sndbuf / rcvbuf size. */ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) { u8 compressed; if (size <= SMC_BUF_MIN_SIZE) return 0; size = (size - 1) >> 14; /* convert to 16K multiple */ compressed = min_t(u8, ilog2(size) + 1, is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES); #ifdef CONFIG_ARCH_NO_SG_CHAIN if (!is_smcd && is_rmb) /* RMBs are backed by & limited to max size of scatterlists */ compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14)); #endif return compressed; } /* convert the RMB size from compressed notation into integer */ int smc_uncompress_bufsize(u8 compressed) { u32 size; size = 0x00000001 << (((int)compressed) + 14); return (int)size; } /* try to reuse a sndbuf or rmb description slot for a certain * buffer size; if not available, return NULL */ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize, struct rw_semaphore *lock, struct list_head *buf_list) { struct smc_buf_desc *buf_slot; down_read(lock); list_for_each_entry(buf_slot, buf_list, list) { if (cmpxchg(&buf_slot->used, 0, 1) == 0) { up_read(lock); return buf_slot; } } up_read(lock); return NULL; } /* one of the conditions for announcing a receiver's current window size is * that it "results in a minimum increase in the window size of 10% of the * receive buffer space" [RFC7609] */ static inline int smc_rmb_wnd_update_limit(int rmbe_size) { return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } /* map an buf to a link */ static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link *lnk) { int rc, i, nents, offset, buf_size, size, access_flags; struct scatterlist *sg; void *buf; if (buf_desc->is_map_ib[lnk->link_idx]) return 0; if (buf_desc->is_vm) { buf = buf_desc->cpu_addr; buf_size = buf_desc->len; offset = offset_in_page(buf_desc->cpu_addr); nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE; } else { nents = 1; } rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL); if (rc) return rc; if (buf_desc->is_vm) { /* virtually contiguous buffer */ for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) { size = min_t(int, PAGE_SIZE - offset, buf_size); sg_set_page(sg, vmalloc_to_page(buf), size, offset); buf += size; buf_size -= size; offset = 0; } } else { /* physically contiguous buffer */ sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl, buf_desc->cpu_addr, buf_desc->len); } /* map sg table to DMA address */ rc = smc_ib_buf_map_sg(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); /* SMC protocol depends on mapping to one DMA address only */ if (rc != nents) { rc = -EAGAIN; goto free_table; } buf_desc->is_dma_need_sync |= smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx; if (is_rmb || buf_desc->is_vm) { /* create a new memory region for the RMB or vzalloced sndbuf */ access_flags = is_rmb ? IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_LOCAL_WRITE; rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags, buf_desc, lnk->link_idx); if (rc) goto buf_unmap; smc_ib_sync_sg_for_device(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } buf_desc->is_map_ib[lnk->link_idx] = true; return 0; buf_unmap: smc_ib_buf_unmap_sg(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); free_table: sg_free_table(&buf_desc->sgt[lnk->link_idx]); return rc; } /* register a new buf on IB device, rmb or vzalloced sndbuf * must be called under lgr->llc_conf_mutex lock */ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc) { if (list_empty(&link->lgr->list)) return -ENOLINK; if (!buf_desc->is_reg_mr[link->link_idx]) { /* register memory region for new buf */ if (buf_desc->is_vm) buf_desc->mr[link->link_idx]->iova = (uintptr_t)buf_desc->cpu_addr; if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) { buf_desc->is_reg_err = true; return -EFAULT; } buf_desc->is_reg_mr[link->link_idx] = true; } return 0; } static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock, struct list_head *lst, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf; int rc = 0; down_write(lock); list_for_each_entry_safe(buf_desc, bf, lst, list) { if (!buf_desc->used) continue; rc = smcr_buf_map_link(buf_desc, is_rmb, lnk); if (rc) goto out; } out: up_write(lock); return rc; } /* map all used buffers of lgr for a new link */ int smcr_buf_map_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; int i, rc = 0; for (i = 0; i < SMC_RMBE_SIZES; i++) { rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock, &lgr->rmbs[i], true); if (rc) return rc; rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock, &lgr->sndbufs[i], false); if (rc) return rc; } return 0; } /* register all used buffers of lgr for a new link, * must be called under lgr->llc_conf_mutex lock */ int smcr_buf_reg_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_buf_desc *buf_desc, *bf; int i, rc = 0; /* reg all RMBs for a new link */ down_write(&lgr->rmbs_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) { if (!buf_desc->used) continue; rc = smcr_link_reg_buf(lnk, buf_desc); if (rc) { up_write(&lgr->rmbs_lock); return rc; } } } up_write(&lgr->rmbs_lock); if (lgr->buf_type == SMCR_PHYS_CONT_BUFS) return rc; /* reg all vzalloced sndbufs for a new link */ down_write(&lgr->sndbufs_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) { if (!buf_desc->used || !buf_desc->is_vm) continue; rc = smcr_link_reg_buf(lnk, buf_desc); if (rc) { up_write(&lgr->sndbufs_lock); return rc; } } } up_write(&lgr->sndbufs_lock); return rc; } static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, int bufsize) { struct smc_buf_desc *buf_desc; /* try to alloc a new buffer */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) return ERR_PTR(-ENOMEM); switch (lgr->buf_type) { case SMCR_PHYS_CONT_BUFS: case SMCR_MIXED_BUFS: buf_desc->order = get_order(bufsize); buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY | __GFP_ZERO, buf_desc->order); if (buf_desc->pages) { buf_desc->cpu_addr = (void *)page_address(buf_desc->pages); buf_desc->len = bufsize; buf_desc->is_vm = false; break; } if (lgr->buf_type == SMCR_PHYS_CONT_BUFS) goto out; fallthrough; // try virtually contiguous buf case SMCR_VIRT_CONT_BUFS: buf_desc->order = get_order(bufsize); buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); if (!buf_desc->cpu_addr) goto out; buf_desc->pages = NULL; buf_desc->len = bufsize; buf_desc->is_vm = true; break; } return buf_desc; out: kfree(buf_desc); return ERR_PTR(-EAGAIN); } /* map buf_desc on all usable links, * unused buffers stay mapped as long as the link is up */ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { int i, rc = 0, cnt = 0; /* protect against parallel link reconfiguration */ down_read(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (!smc_link_usable(lnk)) continue; if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) { rc = -ENOMEM; goto out; } cnt++; } out: up_read(&lgr->llc_conf_mutex); if (!rc && !cnt) rc = -EINVAL; return rc; } static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, bool is_dmb, int bufsize) { struct smc_buf_desc *buf_desc; int rc; /* try to alloc a new DMB */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) return ERR_PTR(-ENOMEM); if (is_dmb) { rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); if (rc) { kfree(buf_desc); if (rc == -ENOMEM) return ERR_PTR(-EAGAIN); if (rc == -ENOSPC) return ERR_PTR(-ENOSPC); return ERR_PTR(-EIO); } buf_desc->pages = virt_to_page(buf_desc->cpu_addr); /* CDC header stored in buf. So, pretend it was smaller */ buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); } else { buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC); if (!buf_desc->cpu_addr) { kfree(buf_desc); return ERR_PTR(-EAGAIN); } buf_desc->len = bufsize; } return buf_desc; } static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) { struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct list_head *buf_list; int bufsize, bufsize_comp; struct rw_semaphore *lock; /* lock buffer list */ bool is_dgraded = false; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ bufsize = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ bufsize = smc->sk.sk_sndbuf / 2; for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb); bufsize_comp >= 0; bufsize_comp--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_comp]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_comp]; } bufsize = smc_uncompress_bufsize(bufsize_comp); /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list); if (buf_desc) { buf_desc->is_dma_need_sync = 0; SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize); SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb); break; /* found reusable slot */ } if (is_smcd) buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); else buf_desc = smcr_new_buf_create(lgr, bufsize); if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) { if (!is_dgraded) { is_dgraded = true; SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb); } continue; } SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb); SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize); buf_desc->used = 1; down_write(lock); smc_lgr_buf_list_add(lgr, is_rmb, buf_list, buf_desc); up_write(lock); break; /* found */ } if (IS_ERR(buf_desc)) return PTR_ERR(buf_desc); if (!is_smcd) { if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) { smcr_buf_unuse(buf_desc, is_rmb, lgr); return -ENOMEM; } } if (is_rmb) { conn->rmb_desc = buf_desc; conn->rmbe_size_comp = bufsize_comp; smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(buf_desc->len); if (is_smcd) smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; smc->sk.sk_sndbuf = bufsize * 2; atomic_set(&conn->sndbuf_space, bufsize); } return 0; } void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { if (!conn->sndbuf_desc->is_dma_need_sync) return; if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) { int i; if (!conn->rmb_desc->is_dma_need_sync) return; if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); } } /* create the send and receive buffer for an SMC socket; * receive buffers are called RMBs; * (even though the SMC protocol allows more than one RMB-element per RMB, * the Linux implementation uses just one RMB-element per RMB, i.e. uses an * extra RMB for every connection in a link group */ int smc_buf_create(struct smc_sock *smc, bool is_smcd) { int rc; /* create send buffer */ if (is_smcd && smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd)) goto create_rmb; rc = __smc_buf_create(smc, is_smcd, false); if (rc) return rc; create_rmb: /* create rmb */ rc = __smc_buf_create(smc, is_smcd, true); if (rc && smc->conn.sndbuf_desc) { down_write(&smc->conn.lgr->sndbufs_lock); smc_lgr_buf_list_del(smc->conn.lgr, false, smc->conn.sndbuf_desc); up_write(&smc->conn.lgr->sndbufs_lock); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); smc->conn.sndbuf_desc = NULL; } return rc; } int smcd_buf_attach(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; struct smcd_dev *smcd = conn->lgr->smcd; u64 peer_token = conn->peer_token; struct smc_buf_desc *buf_desc; int rc; buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) return -ENOMEM; /* The ghost sndbuf_desc describes the same memory region as * peer RMB. Its lifecycle is consistent with the connection's * and it will be freed with the connections instead of the * link group. */ rc = smc_ism_attach_dmb(smcd, peer_token, buf_desc); if (rc) goto free; smc->sk.sk_sndbuf = buf_desc->len; buf_desc->cpu_addr = (u8 *)buf_desc->cpu_addr + sizeof(struct smcd_cdc_msg); buf_desc->len -= sizeof(struct smcd_cdc_msg); conn->sndbuf_desc = buf_desc; conn->sndbuf_desc->used = 1; atomic_set(&conn->sndbuf_space, conn->sndbuf_desc->len); return 0; free: kfree(buf_desc); return rc; } static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr) { int i; for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) { if (!test_and_set_bit(i, lgr->rtokens_used_mask)) return i; } return -ENOSPC; } static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx, u32 rkey) { int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (test_bit(i, lgr->rtokens_used_mask) && lgr->rtokens[i][lnk_idx].rkey == rkey) return i; } return -ENOENT; } /* set rtoken for a new link to an existing rmb */ void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey) { int rtok_idx; rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known)); if (rtok_idx == -ENOENT) return; lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey); lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr); } /* set rtoken for a new link whose link_id is given */ void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, __be64 nw_vaddr, __be32 nw_rkey) { u64 dma_addr = be64_to_cpu(nw_vaddr); u32 rkey = ntohl(nw_rkey); bool found = false; int link_idx; for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) { if (lgr->lnk[link_idx].link_id == link_id) { found = true; break; } } if (!found) return; lgr->rtokens[rtok_idx][link_idx].rkey = rkey; lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr; } /* add a new rtoken from peer */ int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey) { struct smc_link_group *lgr = smc_get_lgr(lnk); u64 dma_addr = be64_to_cpu(nw_vaddr); u32 rkey = ntohl(nw_rkey); int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (lgr->rtokens[i][lnk->link_idx].rkey == rkey && lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr && test_bit(i, lgr->rtokens_used_mask)) { /* already in list */ return i; } } i = smc_rmb_reserve_rtoken_idx(lgr); if (i < 0) return i; lgr->rtokens[i][lnk->link_idx].rkey = rkey; lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr; return i; } /* delete an rtoken from all links */ int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey) { struct smc_link_group *lgr = smc_get_lgr(lnk); u32 rkey = ntohl(nw_rkey); int i, j; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (lgr->rtokens[i][lnk->link_idx].rkey == rkey && test_bit(i, lgr->rtokens_used_mask)) { for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) { lgr->rtokens[i][j].rkey = 0; lgr->rtokens[i][j].dma_addr = 0; } clear_bit(i, lgr->rtokens_used_mask); return 0; } } return -ENOENT; } /* save rkey and dma_addr received from peer during clc handshake */ int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *lnk, struct smc_clc_msg_accept_confirm *clc) { conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr, clc->r0.rmb_rkey); if (conn->rtoken_idx < 0) return conn->rtoken_idx; return 0; } static void smc_core_going_away(void) { struct smc_ib_device *smcibdev; struct smcd_dev *smcd; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { int i; for (i = 0; i < SMC_MAX_PORTS; i++) set_bit(i, smcibdev->ports_going_away); } mutex_unlock(&smc_ib_devices.mutex); mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { smcd->going_away = 1; } mutex_unlock(&smcd_dev_list.mutex); } /* Clean up all SMC link groups */ static void smc_lgrs_shutdown(void) { struct smcd_dev *smcd; smc_core_going_away(); smc_smcr_terminate_all(NULL); mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) smc_smcd_terminate_all(smcd); mutex_unlock(&smcd_dev_list.mutex); } static int smc_core_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { smc_lgrs_shutdown(); smc_ib_unregister_client(); smc_ism_exit(); return 0; } static struct notifier_block smc_reboot_notifier = { .notifier_call = smc_core_reboot_event, }; int __init smc_core_init(void) { return register_reboot_notifier(&smc_reboot_notifier); } /* Called (from smc_exit) when module is removed */ void smc_core_exit(void) { unregister_reboot_notifier(&smc_reboot_notifier); smc_lgrs_shutdown(); } |
518 128 393 520 4 325 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | // SPDX-License-Identifier: GPL-2.0 /* * Functions related to generic timeout handling of requests. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/fault-inject.h> #include "blk.h" #include "blk-mq.h" #ifdef CONFIG_FAIL_IO_TIMEOUT static DECLARE_FAULT_ATTR(fail_io_timeout); static int __init setup_fail_io_timeout(char *str) { return setup_fault_attr(&fail_io_timeout, str); } __setup("fail_io_timeout=", setup_fail_io_timeout); bool __blk_should_fake_timeout(struct request_queue *q) { return should_fail(&fail_io_timeout, 1); } EXPORT_SYMBOL_GPL(__blk_should_fake_timeout); static int __init fail_io_timeout_debugfs(void) { struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout", NULL, &fail_io_timeout); return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_io_timeout_debugfs); ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); return sprintf(buf, "%d\n", set != 0); } ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); int val; if (count) { struct request_queue *q = disk->queue; char *p = (char *) buf; val = simple_strtoul(p, &p, 10); if (val) blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); else blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); } return count; } #endif /* CONFIG_FAIL_IO_TIMEOUT */ /** * blk_abort_request - Request recovery for the specified command * @req: pointer to the request of interest * * This function requests that the block layer start recovery for the * request by deleting the timer and calling the q's timeout function. * LLDDs who implement their own error recovery MAY ignore the timeout * event if they generated blk_abort_request. */ void blk_abort_request(struct request *req) { /* * All we need to ensure is that timeout scan takes place * immediately and that scan sees the new timeout value. * No need for fancy synchronizations. */ WRITE_ONCE(req->deadline, jiffies); kblockd_schedule_work(&req->q->timeout_work); } EXPORT_SYMBOL_GPL(blk_abort_request); static unsigned long blk_timeout_mask __read_mostly; static int __init blk_timeout_init(void) { blk_timeout_mask = roundup_pow_of_two(HZ) - 1; return 0; } late_initcall(blk_timeout_init); /* * Just a rough estimate, we don't care about specific values for timeouts. */ static inline unsigned long blk_round_jiffies(unsigned long j) { return (j + blk_timeout_mask) + 1; } unsigned long blk_rq_timeout(unsigned long timeout) { unsigned long maxt; maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT); if (time_after(timeout, maxt)) timeout = maxt; return timeout; } /** * blk_add_timer - Start timeout timer for a single request * @req: request that is about to start running. * * Notes: * Each request has its own timer, and as it is added to the queue, we * set up the timer. When the request completes, we cancel the timer. */ void blk_add_timer(struct request *req) { struct request_queue *q = req->q; unsigned long expiry; /* * Some LLDs, like scsi, peek at the timeout to prevent a * command from being retried forever. */ if (!req->timeout) req->timeout = q->rq_timeout; req->rq_flags &= ~RQF_TIMED_OUT; expiry = jiffies + req->timeout; WRITE_ONCE(req->deadline, expiry); /* * If the timer isn't already pending or this timeout is earlier * than an existing one, modify the timer. Round up to next nearest * second. */ expiry = blk_rq_timeout(blk_round_jiffies(expiry)); if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) { unsigned long diff = q->timeout.expires - expiry; /* * Due to added timer slack to group timers, the timer * will often be a little in front of what we asked for. * So apply some tolerance here too, otherwise we keep * modifying the timer because expires for value X * will be X + something. */ if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) mod_timer(&q->timeout, expiry); } } |
17 22 22 3 3 1 2 3 3 1 1 1 23 23 1 22 4 22 1 1 3 4 1 3 7 3 2 1 1 4 4 2 1 1 4 4 11 17 1 1 31 29 2 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 | // SPDX-License-Identifier: GPL-2.0-only /* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> */ #include <linux/mISDNif.h> #include <linux/slab.h> #include <linux/export.h> #include "core.h" static u_int *debug; static struct proto mISDN_proto = { .name = "misdn", .owner = THIS_MODULE, .obj_size = sizeof(struct mISDN_sock) }; #define _pms(sk) ((struct mISDN_sock *)sk) static struct mISDN_sock_list data_sockets = { .lock = __RW_LOCK_UNLOCKED(data_sockets.lock) }; static struct mISDN_sock_list base_sockets = { .lock = __RW_LOCK_UNLOCKED(base_sockets.lock) }; #define L2_HEADER_LEN 4 static inline struct sk_buff * _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) { struct sk_buff *skb; skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask); if (likely(skb)) skb_reserve(skb, L2_HEADER_LEN); return skb; } static void mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); } static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); } static int mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDN_sock *msk; int err; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb); if (msk->sk.sk_state == MISDN_CLOSED) return -EUNATCH; __net_timestamp(skb); err = sock_queue_rcv_skb(&msk->sk, skb); if (err) printk(KERN_WARNING "%s: error %d\n", __func__, err); return err; } static int mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDN_sock *msk; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: msk->sk.sk_state = MISDN_CLOSED; break; } return 0; } static inline void mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { struct __kernel_old_timeval tv; if (_pms(sk)->cmask & MISDN_TIME_STAMP) { skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv); } } static int mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name); maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } msg->msg_namelen = sizeof(*maddr); } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) refcount_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_msg(skb, 0, msg, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; } static int mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; int err = -ENOMEM; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n", __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr, sk->sk_protocol); if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE)) return -EINVAL; if (len < MISDN_HEADER_LEN) return -EINVAL; if (sk->sk_state != MISDN_BOUND) return -EBADFD; lock_sock(sk); skb = _l2_alloc_skb(len, GFP_KERNEL); if (!skb) goto done; if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto done; } memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN); skb_pull(skb, MISDN_HEADER_LEN); if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { /* if we have a address, we use it */ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name); mISDN_HEAD_ID(skb) = maddr->channel; } else { /* use default for L2 messages */ if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr; } if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: ID:%x\n", __func__, mISDN_HEAD_ID(skb)); err = -ENODEV; if (!_pms(sk)->ch.peer) goto done; err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb); if (err) goto done; else { skb = NULL; err = len; } done: kfree_skb(skb); release_sock(sk); return err; } static int data_sock_release(struct socket *sock) { struct sock *sk = sock->sk; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: if (sk->sk_state == MISDN_BOUND) delete_channel(&_pms(sk)->ch); else mISDN_sock_unlink(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: delete_channel(&_pms(sk)->ch); mISDN_sock_unlink(&data_sockets, sk); break; } lock_sock(sk); sock_orphan(sk); skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static int data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p) { struct mISDN_ctrl_req cq; int err = -EINVAL, val[2]; struct mISDNchannel *bchan, *next; lock_sock(sk); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } switch (cmd) { case IMCTRLREQ: if (copy_from_user(&cq, p, sizeof(cq))) { err = -EFAULT; break; } if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) { list_for_each_entry_safe(bchan, next, &_pms(sk)->dev->bchannels, list) { if (bchan->nr == cq.channel) { err = bchan->ctrl(bchan, CONTROL_CHANNEL, &cq); break; } } } else err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D, CONTROL_CHANNEL, &cq); if (err) break; if (copy_to_user(p, &cq, sizeof(cq))) err = -EFAULT; break; case IMCLEAR_L2: if (sk->sk_protocol != ISDN_P_LAPD_NT) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; case IMHOLD_L1: if (sk->sk_protocol != ISDN_P_LAPD_NT && sk->sk_protocol != ISDN_P_LAPD_TE) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; default: err = -EINVAL; break; } done: release_sock(sk); return err; } static int data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct sock *sk = sock->sk; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; default: if (sk->sk_state == MISDN_BOUND) err = data_sock_ioctl_bound(sk, cmd, (void __user *)arg); else err = -ENOTCONN; } return err; } static int data_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0, opt = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock, level, optname, optlen); lock_sock(sk); switch (optname) { case MISDN_TIME_STAMP: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) _pms(sk)->cmask |= MISDN_TIME_STAMP; else _pms(sk)->cmask &= ~MISDN_TIME_STAMP; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int data_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, opt; if (get_user(len, optlen)) return -EFAULT; if (len != sizeof(char)) return -EINVAL; switch (optname) { case MISDN_TIME_STAMP: if (_pms(sk)->cmask & MISDN_TIME_STAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) return -EFAULT; break; default: return -ENOPROTOOPT; } return 0; } static int data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; struct sock *csk; int err = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (addr_len != sizeof(struct sockaddr_mISDN)) return -EINVAL; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } if (sk->sk_protocol < ISDN_P_B_START) { read_lock_bh(&data_sockets.lock); sk_for_each(csk, &data_sockets.head) { if (sk == csk) continue; if (_pms(csk)->dev != _pms(sk)->dev) continue; if (csk->sk_protocol >= ISDN_P_B_START) continue; if (IS_ISDN_P_TE(csk->sk_protocol) == IS_ISDN_P_TE(sk->sk_protocol)) continue; read_unlock_bh(&data_sockets.lock); err = -EBUSY; goto done; } read_unlock_bh(&data_sockets.lock); } _pms(sk)->ch.send = mISDN_send; _pms(sk)->ch.ctrl = mISDN_ctrl; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: mISDN_sock_unlink(&data_sockets, sk); err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); if (err) mISDN_sock_link(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; default: err = -EPROTONOSUPPORT; } if (err) goto done; sk->sk_state = MISDN_BOUND; _pms(sk)->ch.protocol = sk->sk_protocol; done: release_sock(sk); return err; } static int data_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; if (!_pms(sk)->dev) return -EBADFD; lock_sock(sk); maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xff; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff; release_sock(sk); return sizeof(*maddr); } static const struct proto_ops data_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = data_sock_release, .ioctl = data_sock_ioctl, .bind = data_sock_bind, .getname = data_sock_getname, .sendmsg = mISDN_sock_sendmsg, .recvmsg = mISDN_sock_recvmsg, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = data_sock_setsockopt, .getsockopt = data_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int data_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &data_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&data_sockets, sk); return 0; } static int base_sock_release(struct socket *sock) { struct sock *sk = sock->sk; printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; mISDN_sock_unlink(&base_sockets, sk); sock_orphan(sk); sock_put(sk); return 0; } static int base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; case IMSETDEVNAME: { struct mISDN_devrename dn; if (copy_from_user(&dn, (void __user *)arg, sizeof(dn))) { err = -EFAULT; break; } dn.name[sizeof(dn.name) - 1] = '\0'; dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); else err = -ENODEV; } break; default: err = -EINVAL; } return err; } static int base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; int err = 0; if (addr_len < sizeof(struct sockaddr_mISDN)) return -EINVAL; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } sk->sk_state = MISDN_BOUND; done: release_sock(sk); return err; } static const struct proto_ops base_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = base_sock_release, .ioctl = base_sock_ioctl, .bind = base_sock_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int base_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (!capable(CAP_NET_RAW)) return -EPERM; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &base_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&base_sockets, sk); return 0; } static int mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err = -EPROTONOSUPPORT; switch (proto) { case ISDN_P_BASE: err = base_sock_create(net, sock, proto, kern); break; case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = data_sock_create(net, sock, proto, kern); break; default: return err; } return err; } static const struct net_proto_family mISDN_sock_family_ops = { .owner = THIS_MODULE, .family = PF_ISDN, .create = mISDN_sock_create, }; int misdn_sock_init(u_int *deb) { int err; debug = deb; err = sock_register(&mISDN_sock_family_ops); if (err) printk(KERN_ERR "%s: error(%d)\n", __func__, err); return err; } void misdn_sock_cleanup(void) { sock_unregister(PF_ISDN); } |
7 7 7 7 1 7 5 1 5 6 3 3 5 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 | // SPDX-License-Identifier: GPL-2.0-or-later /* hfcsusb.c * mISDN driver for Colognechip HFC-S USB chip * * Copyright 2001 by Peter Sprenger (sprenger@moving-bytes.de) * Copyright 2008 by Martin Bachem (info@bachem-it.com) * * module params * debug=<n>, default=0, with n=0xHHHHGGGG * H - l1 driver flags described in hfcsusb.h * G - common mISDN debug flags described at mISDNhw.h * * poll=<n>, default 128 * n : burst size of PH_DATA_IND at transparent rx data * * Revision: 0.3.3 (socket), 2008-11-05 */ #include <linux/module.h> #include <linux/delay.h> #include <linux/usb.h> #include <linux/mISDNhw.h> #include <linux/slab.h> #include "hfcsusb.h" static unsigned int debug; static int poll = DEFAULT_TRANSP_BURST_SZ; static LIST_HEAD(HFClist); static DEFINE_RWLOCK(HFClock); MODULE_AUTHOR("Martin Bachem"); MODULE_DESCRIPTION("mISDN driver for Colognechip HFC-S USB chip"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(poll, int, 0); static int hfcsusb_cnt; /* some function prototypes */ static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command); static void release_hw(struct hfcsusb *hw); static void reset_hfcsusb(struct hfcsusb *hw); static void setPortMode(struct hfcsusb *hw); static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel); static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel); static int hfcsusb_setup_bch(struct bchannel *bch, int protocol); static void deactivate_bchannel(struct bchannel *bch); static int hfcsusb_ph_info(struct hfcsusb *hw); /* start next background transfer for control channel */ static void ctrl_start_transfer(struct hfcsusb *hw) { if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); if (hw->ctrl_cnt) { hw->ctrl_urb->pipe = hw->ctrl_out_pipe; hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write; hw->ctrl_urb->transfer_buffer = NULL; hw->ctrl_urb->transfer_buffer_length = 0; hw->ctrl_write.wIndex = cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg); hw->ctrl_write.wValue = cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val); usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC); } } /* * queue a control transfer request to write HFC-S USB * chip register using CTRL resuest queue */ static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val) { struct ctrl_buf *buf; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n", hw->name, __func__, reg, val); spin_lock(&hw->ctrl_lock); if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) { spin_unlock(&hw->ctrl_lock); return 1; } buf = &hw->ctrl_buff[hw->ctrl_in_idx]; buf->hfcs_reg = reg; buf->reg_val = val; if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE) hw->ctrl_in_idx = 0; if (++hw->ctrl_cnt == 1) ctrl_start_transfer(hw); spin_unlock(&hw->ctrl_lock); return 0; } /* control completion routine handling background control cmds */ static void ctrl_complete(struct urb *urb) { struct hfcsusb *hw = (struct hfcsusb *) urb->context; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); urb->dev = hw->dev; if (hw->ctrl_cnt) { hw->ctrl_cnt--; /* decrement actual count */ if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE) hw->ctrl_out_idx = 0; /* pointer wrap */ ctrl_start_transfer(hw); /* start next transfer */ } } /* handle LED bits */ static void set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on) { if (set_on) { if (led_bits < 0) hw->led_state &= ~abs(led_bits); else hw->led_state |= led_bits; } else { if (led_bits < 0) hw->led_state |= abs(led_bits); else hw->led_state &= ~led_bits; } } /* handle LED requests */ static void handle_led(struct hfcsusb *hw, int event) { struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *) hfcsusb_idtab[hw->vend_idx].driver_info; __u8 tmpled; if (driver_info->led_scheme == LED_OFF) return; tmpled = hw->led_state; switch (event) { case LED_POWER_ON: set_led_bit(hw, driver_info->led_bits[0], 1); set_led_bit(hw, driver_info->led_bits[1], 0); set_led_bit(hw, driver_info->led_bits[2], 0); set_led_bit(hw, driver_info->led_bits[3], 0); break; case LED_POWER_OFF: set_led_bit(hw, driver_info->led_bits[0], 0); set_led_bit(hw, driver_info->led_bits[1], 0); set_led_bit(hw, driver_info->led_bits[2], 0); set_led_bit(hw, driver_info->led_bits[3], 0); break; case LED_S0_ON: set_led_bit(hw, driver_info->led_bits[1], 1); break; case LED_S0_OFF: set_led_bit(hw, driver_info->led_bits[1], 0); break; case LED_B1_ON: set_led_bit(hw, driver_info->led_bits[2], 1); break; case LED_B1_OFF: set_led_bit(hw, driver_info->led_bits[2], 0); break; case LED_B2_ON: set_led_bit(hw, driver_info->led_bits[3], 1); break; case LED_B2_OFF: set_led_bit(hw, driver_info->led_bits[3], 0); break; } if (hw->led_state != tmpled) { if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n", hw->name, __func__, HFCUSB_P_DATA, hw->led_state); write_reg(hw, HFCUSB_P_DATA, hw->led_state); } } /* * Layer2 -> Layer 1 Bchannel data */ static int hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct hfcsusb *hw = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); u_long flags; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&hw->lock, flags); ret = bchannel_senddata(bch, skb); spin_unlock_irqrestore(&hw->lock, flags); if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n", hw->name, __func__, ret); if (ret > 0) ret = 0; return ret; case PH_ACTIVATE_REQ: if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) { hfcsusb_start_endpoint(hw, bch->nr - 1); ret = hfcsusb_setup_bch(bch, ch->protocol); } else ret = 0; if (!ret) _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); break; case PH_DEACTIVATE_REQ: deactivate_bchannel(bch); _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); ret = 0; break; } if (!ret) dev_kfree_skb(skb); return ret; } /* * send full D/B channel status information * as MPH_INFORMATION_IND */ static int hfcsusb_ph_info(struct hfcsusb *hw) { struct ph_info *phi; struct dchannel *dch = &hw->dch; int i; phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); if (!phi) return -ENOMEM; phi->dch.ch.protocol = hw->protocol; phi->dch.ch.Flags = dch->Flags; phi->dch.state = dch->state; phi->dch.num_bch = dch->dev.nrbchan; for (i = 0; i < dch->dev.nrbchan; i++) { phi->bch[i].protocol = hw->bch[i].ch.protocol; phi->bch[i].Flags = hw->bch[i].Flags; } _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC); kfree(phi); return 0; } /* * Layer2 -> Layer 1 Dchannel data */ static int hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct mISDNhead *hh = mISDN_HEAD_P(skb); struct hfcsusb *hw = dch->hw; int ret = -EINVAL; u_long flags; switch (hh->prim) { case PH_DATA_REQ: if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n", hw->name, __func__); spin_lock_irqsave(&hw->lock, flags); ret = dchannel_senddata(dch, skb); spin_unlock_irqrestore(&hw->lock, flags); if (ret > 0) { ret = 0; queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL); } break; case PH_ACTIVATE_REQ: if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n", hw->name, __func__, (hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE"); if (hw->protocol == ISDN_P_NT_S0) { ret = 0; if (test_bit(FLG_ACTIVE, &dch->Flags)) { _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } else { hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_NT); test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags); } } else { hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE); ret = l1_event(dch->l1, hh->prim); } break; case PH_DEACTIVATE_REQ: if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n", hw->name, __func__); test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (hw->protocol == ISDN_P_NT_S0) { struct sk_buff_head free_queue; __skb_queue_head_init(&free_queue); hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); spin_lock_irqsave(&hw->lock, flags); skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); spin_unlock_irqrestore(&hw->lock, flags); __skb_queue_purge(&free_queue); #ifdef FIXME if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) dchannel_sched_event(&hc->dch, D_CLEARBUSY); #endif ret = 0; } else ret = l1_event(dch->l1, hh->prim); break; case MPH_INFORMATION_REQ: ret = hfcsusb_ph_info(hw); break; } return ret; } /* * Layer 1 callback function */ static int hfc_l1callback(struct dchannel *dch, u_int cmd) { struct hfcsusb *hw = dch->hw; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s cmd 0x%x\n", hw->name, __func__, cmd); switch (cmd) { case INFO3_P8: case INFO3_P10: case HW_RESET_REQ: case HW_POWERUP_REQ: break; case HW_DEACT_REQ: skb_queue_purge(&dch->squeue); if (dch->tx_skb) { dev_kfree_skb(dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { dev_kfree_skb(dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); break; case PH_ACTIVATE_IND: test_and_set_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; case PH_DEACTIVATE_IND: test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: unknown cmd %x\n", hw->name, __func__, cmd); return -1; } return hfcsusb_ph_info(hw); } static int open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch, struct channel_req *rq) { int err = 0; if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n", hw->name, __func__, hw->dch.dev.id, rq->adr.channel, __builtin_return_address(0)); if (rq->protocol == ISDN_P_NONE) return -EINVAL; test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags); test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags); hfcsusb_start_endpoint(hw, HFC_CHAN_D); /* E-Channel logging */ if (rq->adr.channel == 1) { if (hw->fifos[HFCUSB_PCM_RX].pipe) { hfcsusb_start_endpoint(hw, HFC_CHAN_E); set_bit(FLG_ACTIVE, &hw->ech.Flags); _queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } else return -EINVAL; } if (!hw->initdone) { hw->protocol = rq->protocol; if (rq->protocol == ISDN_P_TE_S0) { err = create_l1(&hw->dch, hfc_l1callback); if (err) return err; } setPortMode(hw); ch->protocol = rq->protocol; hw->initdone = 1; } else { if (rq->protocol != ch->protocol) return -EPROTONOSUPPORT; } if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) || ((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7))) _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); rq->ch = ch; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s: %s: cannot get module\n", hw->name, __func__); return 0; } static int open_bchannel(struct hfcsusb *hw, struct channel_req *rq) { struct bchannel *bch; if (rq->adr.channel == 0 || rq->adr.channel > 2) return -EINVAL; if (rq->protocol == ISDN_P_NONE) return -EINVAL; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s B%i\n", hw->name, __func__, rq->adr.channel); bch = &hw->bch[rq->adr.channel - 1]; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s: %s:cannot get module\n", hw->name, __func__); return 0; } static int channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq) { int ret = 0; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n", hw->name, __func__, (cq->op), (cq->channel)); switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | MISDN_CTRL_DISCONNECT; break; default: printk(KERN_WARNING "%s: %s: unknown Op %x\n", hw->name, __func__, cq->op); ret = -EINVAL; break; } return ret; } /* * device control function */ static int hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct hfcsusb *hw = dch->hw; struct channel_req *rq; int err = 0; if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: cmd:%x %p\n", hw->name, __func__, cmd, arg); switch (cmd) { case OPEN_CHANNEL: rq = arg; if ((rq->protocol == ISDN_P_TE_S0) || (rq->protocol == ISDN_P_NT_S0)) err = open_dchannel(hw, ch, rq); else err = open_bchannel(hw, rq); if (!err) hw->open++; break; case CLOSE_CHANNEL: hw->open--; if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: %s: dev(%d) close from %p (open %d)\n", hw->name, __func__, hw->dch.dev.id, __builtin_return_address(0), hw->open); if (!hw->open) { hfcsusb_stop_endpoint(hw, HFC_CHAN_D); if (hw->fifos[HFCUSB_PCM_RX].pipe) hfcsusb_stop_endpoint(hw, HFC_CHAN_E); handle_led(hw, LED_POWER_ON); } module_put(THIS_MODULE); break; case CONTROL_CHANNEL: err = channel_ctrl(hw, arg); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: unknown command %x\n", hw->name, __func__, cmd); return -EINVAL; } return err; } /* * S0 TE state change event handler */ static void ph_state_te(struct dchannel *dch) { struct hfcsusb *hw = dch->hw; if (debug & DEBUG_HW) { if (dch->state <= HFC_MAX_TE_LAYER1_STATE) printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__, HFC_TE_LAYER1_STATES[dch->state]); else printk(KERN_DEBUG "%s: %s: TE F%d\n", hw->name, __func__, dch->state); } switch (dch->state) { case 0: l1_event(dch->l1, HW_RESET_IND); break; case 3: l1_event(dch->l1, HW_DEACT_IND); break; case 5: case 8: l1_event(dch->l1, ANYSIGNAL); break; case 6: l1_event(dch->l1, INFO2); break; case 7: l1_event(dch->l1, INFO4_P8); break; } if (dch->state == 7) handle_led(hw, LED_S0_ON); else handle_led(hw, LED_S0_OFF); } /* * S0 NT state change event handler */ static void ph_state_nt(struct dchannel *dch) { struct hfcsusb *hw = dch->hw; if (debug & DEBUG_HW) { if (dch->state <= HFC_MAX_NT_LAYER1_STATE) printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__, HFC_NT_LAYER1_STATES[dch->state]); else printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n", hw->name, __func__, dch->state); } switch (dch->state) { case (1): test_and_clear_bit(FLG_ACTIVE, &dch->Flags); test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); hw->nt_timer = 0; hw->timers &= ~NT_ACTIVATION_TIMER; handle_led(hw, LED_S0_OFF); break; case (2): if (hw->nt_timer < 0) { hw->nt_timer = 0; hw->timers &= ~NT_ACTIVATION_TIMER; hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT); } else { hw->timers |= NT_ACTIVATION_TIMER; hw->nt_timer = NT_T1_COUNT; /* allow G2 -> G3 transition */ write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3); } break; case (3): hw->nt_timer = 0; hw->timers &= ~NT_ACTIVATION_TIMER; test_and_set_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); handle_led(hw, LED_S0_ON); break; case (4): hw->nt_timer = 0; hw->timers &= ~NT_ACTIVATION_TIMER; break; default: break; } hfcsusb_ph_info(hw); } static void ph_state(struct dchannel *dch) { struct hfcsusb *hw = dch->hw; if (hw->protocol == ISDN_P_NT_S0) ph_state_nt(dch); else if (hw->protocol == ISDN_P_TE_S0) ph_state_te(dch); } /* * disable/enable BChannel for desired protocol */ static int hfcsusb_setup_bch(struct bchannel *bch, int protocol) { struct hfcsusb *hw = bch->hw; __u8 conhdlc, sctrl, sctrl_r; if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n", hw->name, __func__, bch->state, protocol, bch->nr); /* setup val for CON_HDLC */ conhdlc = 0; if (protocol > ISDN_P_NONE) conhdlc = 8; /* enable FIFO */ switch (protocol) { case (-1): /* used for init */ bch->state = -1; fallthrough; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; /* already in idle state */ bch->state = ISDN_P_NONE; clear_bit(FLG_HDLC, &bch->Flags); clear_bit(FLG_TRANSPARENT, &bch->Flags); break; case (ISDN_P_B_RAW): conhdlc |= 2; bch->state = protocol; set_bit(FLG_TRANSPARENT, &bch->Flags); break; case (ISDN_P_B_HDLC): bch->state = protocol; set_bit(FLG_HDLC, &bch->Flags); break; default: if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: prot not known %x\n", hw->name, __func__, protocol); return -ENOPROTOOPT; } if (protocol >= ISDN_P_NONE) { write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2); write_reg(hw, HFCUSB_CON_HDLC, conhdlc); write_reg(hw, HFCUSB_INC_RES_F, 2); write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3); write_reg(hw, HFCUSB_CON_HDLC, conhdlc); write_reg(hw, HFCUSB_INC_RES_F, 2); sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04); sctrl_r = 0x0; if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) { sctrl |= 1; sctrl_r |= 1; } if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) { sctrl |= 2; sctrl_r |= 2; } write_reg(hw, HFCUSB_SCTRL, sctrl); write_reg(hw, HFCUSB_SCTRL_R, sctrl_r); if (protocol > ISDN_P_NONE) handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON); else handle_led(hw, (bch->nr == 1) ? LED_B1_OFF : LED_B2_OFF); } return hfcsusb_ph_info(hw); } static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: %x\n", hw->name, __func__, command); switch (command) { case HFC_L1_ACTIVATE_TE: /* force sending sending INFO1 */ write_reg(hw, HFCUSB_STATES, 0x14); /* start l1 activation */ write_reg(hw, HFCUSB_STATES, 0x04); break; case HFC_L1_FORCE_DEACTIVATE_TE: write_reg(hw, HFCUSB_STATES, 0x10); write_reg(hw, HFCUSB_STATES, 0x03); break; case HFC_L1_ACTIVATE_NT: if (hw->dch.state == 3) _queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); else write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE | HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3); break; case HFC_L1_DEACTIVATE_NT: write_reg(hw, HFCUSB_STATES, HFCUSB_DO_ACTION); break; } } /* * Layer 1 B-channel hardware access */ static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { return mISDN_ctrl_bchannel(bch, cq); } /* collect data from incoming interrupt or isochron USB data */ static void hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, int finish) { struct hfcsusb *hw = fifo->hw; struct sk_buff *rx_skb = NULL; int maxlen = 0; int fifon = fifo->fifonum; int i; int hdlc = 0; unsigned long flags; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) " "dch(%p) bch(%p) ech(%p)\n", hw->name, __func__, fifon, len, fifo->dch, fifo->bch, fifo->ech); if (!len) return; if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) { printk(KERN_DEBUG "%s: %s: undefined channel\n", hw->name, __func__); return; } spin_lock_irqsave(&hw->lock, flags); if (fifo->dch) { rx_skb = fifo->dch->rx_skb; maxlen = fifo->dch->maxlen; hdlc = 1; } if (fifo->bch) { if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) { fifo->bch->dropcnt += len; spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = bchannel_get_rxbuf(fifo->bch, len); rx_skb = fifo->bch->rx_skb; if (maxlen < 0) { if (rx_skb) skb_trim(rx_skb, 0); pr_warn("%s.B%d: No bufferspace for %d bytes\n", hw->name, fifo->bch->nr, len); spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = fifo->bch->maxlen; hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); } if (fifo->ech) { rx_skb = fifo->ech->rx_skb; maxlen = fifo->ech->maxlen; hdlc = 1; } if (fifo->dch || fifo->ech) { if (!rx_skb) { rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC); if (rx_skb) { if (fifo->dch) fifo->dch->rx_skb = rx_skb; if (fifo->ech) fifo->ech->rx_skb = rx_skb; skb_trim(rx_skb, 0); } else { printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", hw->name, __func__); spin_unlock_irqrestore(&hw->lock, flags); return; } } /* D/E-Channel SKB range check */ if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) { printk(KERN_DEBUG "%s: %s: sbk mem exceeded " "for fifo(%d) HFCUSB_D_RX\n", hw->name, __func__, fifon); skb_trim(rx_skb, 0); spin_unlock_irqrestore(&hw->lock, flags); return; } } skb_put_data(rx_skb, data, len); if (hdlc) { /* we have a complete hdlc packet */ if (finish) { if ((rx_skb->len > 3) && (!(rx_skb->data[rx_skb->len - 1]))) { if (debug & DBG_HFC_FIFO_VERBOSE) { printk(KERN_DEBUG "%s: %s: fifon(%i)" " new RX len(%i): ", hw->name, __func__, fifon, rx_skb->len); i = 0; while (i < rx_skb->len) printk("%02x ", rx_skb->data[i++]); printk("\n"); } /* remove CRC & status */ skb_trim(rx_skb, rx_skb->len - 3); if (fifo->dch) recv_Dchannel(fifo->dch); if (fifo->bch) recv_Bchannel(fifo->bch, MISDN_ID_ANY, 0); if (fifo->ech) recv_Echannel(fifo->ech, &hw->dch); } else { if (debug & DBG_HFC_FIFO_VERBOSE) { printk(KERN_DEBUG "%s: CRC or minlen ERROR fifon(%i) " "RX len(%i): ", hw->name, fifon, rx_skb->len); i = 0; while (i < rx_skb->len) printk("%02x ", rx_skb->data[i++]); printk("\n"); } skb_trim(rx_skb, 0); } } } else { /* deliver transparent data to layer2 */ recv_Bchannel(fifo->bch, MISDN_ID_ANY, false); } spin_unlock_irqrestore(&hw->lock, flags); } static void fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *buf, int num_packets, int packet_size, int interval, usb_complete_t complete, void *context) { int k; usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets, complete, context); urb->number_of_packets = num_packets; urb->transfer_flags = URB_ISO_ASAP; urb->actual_length = 0; urb->interval = interval; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; urb->iso_frame_desc[k].length = packet_size; urb->iso_frame_desc[k].actual_length = 0; } } /* receive completion routine for all ISO tx fifos */ static void rx_iso_complete(struct urb *urb) { struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context; struct usb_fifo *fifo = context_iso_urb->owner_fifo; struct hfcsusb *hw = fifo->hw; int k, len, errcode, offset, num_isoc_packets, fifon, maxlen, status, iso_status, i; __u8 *buf; static __u8 eof[8]; __u8 s0_state; unsigned long flags; fifon = fifo->fifonum; status = urb->status; spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; spin_unlock_irqrestore(&hw->lock, flags); return; } spin_unlock_irqrestore(&hw->lock, flags); /* * ISO transfer only partially completed, * look at individual frame status for details */ if (status == -EXDEV) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: with -EXDEV " "urb->status %d, fifonum %d\n", hw->name, __func__, status, fifon); /* clear status, so go on with ISO transfers */ status = 0; } s0_state = 0; if (fifo->active && !status) { num_isoc_packets = iso_packets[fifon]; maxlen = fifo->usb_packet_maxlen; for (k = 0; k < num_isoc_packets; ++k) { len = urb->iso_frame_desc[k].actual_length; offset = urb->iso_frame_desc[k].offset; buf = context_iso_urb->buffer + offset; iso_status = urb->iso_frame_desc[k].status; if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) { printk(KERN_DEBUG "%s: %s: " "ISO packet %i, status: %i\n", hw->name, __func__, k, iso_status); } /* USB data log for every D ISO in */ if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) { printk(KERN_DEBUG "%s: %s: %d (%d/%d) len(%d) ", hw->name, __func__, urb->start_frame, k, num_isoc_packets - 1, len); for (i = 0; i < len; i++) printk("%x ", buf[i]); printk("\n"); } if (!iso_status) { if (fifo->last_urblen != maxlen) { /* * save fifo fill-level threshold bits * to use them later in TX ISO URB * completions */ hw->threshold_mask = buf[1]; if (fifon == HFCUSB_D_RX) s0_state = (buf[0] >> 4); eof[fifon] = buf[0] & 1; if (len > 2) hfcsusb_rx_frame(fifo, buf + 2, len - 2, (len < maxlen) ? eof[fifon] : 0); } else hfcsusb_rx_frame(fifo, buf, len, (len < maxlen) ? eof[fifon] : 0); fifo->last_urblen = len; } } /* signal S0 layer1 state change */ if ((s0_state) && (hw->initdone) && (s0_state != hw->dch.state)) { hw->dch.state = s0_state; schedule_event(&hw->dch, FLG_PHCHANGE); } fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, (usb_complete_t)rx_iso_complete, urb->context); errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: error submitting " "ISO URB: %d\n", hw->name, __func__, errcode); } } else { if (status && (debug & DBG_HFC_URB_INFO)) printk(KERN_DEBUG "%s: %s: rx_iso_complete : " "urb->status %d, fifonum %d\n", hw->name, __func__, status, fifon); } } /* receive completion routine for all interrupt rx fifos */ static void rx_int_complete(struct urb *urb) { int len, status, i; __u8 *buf, maxlen, fifon; struct usb_fifo *fifo = (struct usb_fifo *) urb->context; struct hfcsusb *hw = fifo->hw; static __u8 eof[8]; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; spin_unlock_irqrestore(&hw->lock, flags); return; } spin_unlock_irqrestore(&hw->lock, flags); fifon = fifo->fifonum; if ((!fifo->active) || (urb->status)) { if (debug & DBG_HFC_URB_ERROR) printk(KERN_DEBUG "%s: %s: RX-Fifo %i is going down (%i)\n", hw->name, __func__, fifon, urb->status); fifo->urb->interval = 0; /* cancel automatic rescheduling */ return; } len = urb->actual_length; buf = fifo->buffer; maxlen = fifo->usb_packet_maxlen; /* USB data log for every D INT in */ if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) { printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ", hw->name, __func__, len); for (i = 0; i < len; i++) printk("%02x ", buf[i]); printk("\n"); } if (fifo->last_urblen != fifo->usb_packet_maxlen) { /* the threshold mask is in the 2nd status byte */ hw->threshold_mask = buf[1]; /* signal S0 layer1 state change */ if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) { hw->dch.state = (buf[0] >> 4); schedule_event(&hw->dch, FLG_PHCHANGE); } eof[fifon] = buf[0] & 1; /* if we have more than the 2 status bytes -> collect data */ if (len > 2) hfcsusb_rx_frame(fifo, buf + 2, urb->actual_length - 2, (len < maxlen) ? eof[fifon] : 0); } else { hfcsusb_rx_frame(fifo, buf, urb->actual_length, (len < maxlen) ? eof[fifon] : 0); } fifo->last_urblen = urb->actual_length; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: error resubmitting USB\n", hw->name, __func__); } } /* transmit completion routine for all ISO tx fifos */ static void tx_iso_complete(struct urb *urb) { struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context; struct usb_fifo *fifo = context_iso_urb->owner_fifo; struct hfcsusb *hw = fifo->hw; struct sk_buff *tx_skb; int k, tx_offset, num_isoc_packets, sink, remain, current_len, errcode, hdlc, i; int *tx_idx; int frame_complete, fifon, status, fillempty = 0; __u8 threshbit, *p; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; spin_unlock_irqrestore(&hw->lock, flags); return; } if (fifo->dch) { tx_skb = fifo->dch->tx_skb; tx_idx = &fifo->dch->tx_idx; hdlc = 1; } else if (fifo->bch) { tx_skb = fifo->bch->tx_skb; tx_idx = &fifo->bch->tx_idx; hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); if (!tx_skb && !hdlc && test_bit(FLG_FILLEMPTY, &fifo->bch->Flags)) fillempty = 1; } else { printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", hw->name, __func__); spin_unlock_irqrestore(&hw->lock, flags); return; } fifon = fifo->fifonum; status = urb->status; tx_offset = 0; /* * ISO transfer only partially completed, * look at individual frame status for details */ if (status == -EXDEV) { if (debug & DBG_HFC_URB_ERROR) printk(KERN_DEBUG "%s: %s: " "-EXDEV (%i) fifon (%d)\n", hw->name, __func__, status, fifon); /* clear status, so go on with ISO transfers */ status = 0; } if (fifo->active && !status) { /* is FifoFull-threshold set for our channel? */ threshbit = (hw->threshold_mask & (1 << fifon)); num_isoc_packets = iso_packets[fifon]; /* predict dataflow to avoid fifo overflow */ if (fifon >= HFCUSB_D_TX) sink = (threshbit) ? SINK_DMIN : SINK_DMAX; else sink = (threshbit) ? SINK_MIN : SINK_MAX; fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, (usb_complete_t)tx_iso_complete, urb->context); memset(context_iso_urb->buffer, 0, sizeof(context_iso_urb->buffer)); frame_complete = 0; for (k = 0; k < num_isoc_packets; ++k) { /* analyze tx success of previous ISO packets */ if (debug & DBG_HFC_URB_ERROR) { errcode = urb->iso_frame_desc[k].status; if (errcode) { printk(KERN_DEBUG "%s: %s: " "ISO packet %i, status: %i\n", hw->name, __func__, k, errcode); } } /* Generate next ISO Packets */ if (tx_skb) remain = tx_skb->len - *tx_idx; else if (fillempty) remain = 15; /* > not complete */ else remain = 0; if (remain > 0) { fifo->bit_line -= sink; current_len = (0 - fifo->bit_line) / 8; if (current_len > 14) current_len = 14; if (current_len < 0) current_len = 0; if (remain < current_len) current_len = remain; /* how much bit do we put on the line? */ fifo->bit_line += current_len * 8; context_iso_urb->buffer[tx_offset] = 0; if (current_len == remain) { if (hdlc) { /* signal frame completion */ context_iso_urb-> buffer[tx_offset] = 1; /* add 2 byte flags and 16bit * CRC at end of ISDN frame */ fifo->bit_line += 32; } frame_complete = 1; } /* copy tx data to iso-urb buffer */ p = context_iso_urb->buffer + tx_offset + 1; if (fillempty) { memset(p, fifo->bch->fill[0], current_len); } else { memcpy(p, (tx_skb->data + *tx_idx), current_len); *tx_idx += current_len; } urb->iso_frame_desc[k].offset = tx_offset; urb->iso_frame_desc[k].length = current_len + 1; /* USB data log for every D ISO out */ if ((fifon == HFCUSB_D_RX) && !fillempty && (debug & DBG_HFC_USB_VERBOSE)) { printk(KERN_DEBUG "%s: %s (%d/%d) offs(%d) len(%d) ", hw->name, __func__, k, num_isoc_packets - 1, urb->iso_frame_desc[k].offset, urb->iso_frame_desc[k].length); for (i = urb->iso_frame_desc[k].offset; i < (urb->iso_frame_desc[k].offset + urb->iso_frame_desc[k].length); i++) printk("%x ", context_iso_urb->buffer[i]); printk(" skb->len(%i) tx-idx(%d)\n", tx_skb->len, *tx_idx); } tx_offset += (current_len + 1); } else { urb->iso_frame_desc[k].offset = tx_offset++; urb->iso_frame_desc[k].length = 1; /* we lower data margin every msec */ fifo->bit_line -= sink; if (fifo->bit_line < BITLINE_INF) fifo->bit_line = BITLINE_INF; } if (frame_complete) { frame_complete = 0; if (debug & DBG_HFC_FIFO_VERBOSE) { printk(KERN_DEBUG "%s: %s: " "fifon(%i) new TX len(%i): ", hw->name, __func__, fifon, tx_skb->len); i = 0; while (i < tx_skb->len) printk("%02x ", tx_skb->data[i++]); printk("\n"); } dev_consume_skb_irq(tx_skb); tx_skb = NULL; if (fifo->dch && get_next_dframe(fifo->dch)) tx_skb = fifo->dch->tx_skb; else if (fifo->bch && get_next_bframe(fifo->bch)) tx_skb = fifo->bch->tx_skb; } } errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: error submitting ISO URB: %d \n", hw->name, __func__, errcode); } /* * abuse DChannel tx iso completion to trigger NT mode state * changes tx_iso_complete is assumed to be called every * fifo->intervall (ms) */ if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0) && (hw->timers & NT_ACTIVATION_TIMER)) { if ((--hw->nt_timer) < 0) schedule_event(&hw->dch, FLG_PHCHANGE); } } else { if (status && (debug & DBG_HFC_URB_ERROR)) printk(KERN_DEBUG "%s: %s: urb->status %s (%i)" "fifonum=%d\n", hw->name, __func__, symbolic(urb_errlist, status), status, fifon); } spin_unlock_irqrestore(&hw->lock, flags); } /* * allocs urbs and start isoc transfer with two pending urbs to avoid * gaps in the transfer chain */ static int start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb, usb_complete_t complete, int packet_size) { struct hfcsusb *hw = fifo->hw; int i, k, errcode; if (debug) printk(KERN_DEBUG "%s: %s: fifo %i\n", hw->name, __func__, fifo->fifonum); /* allocate Memory for Iso out Urbs */ for (i = 0; i < 2; i++) { if (!(fifo->iso[i].urb)) { fifo->iso[i].urb = usb_alloc_urb(num_packets_per_urb, GFP_KERNEL); if (!(fifo->iso[i].urb)) { printk(KERN_DEBUG "%s: %s: alloc urb for fifo %i failed", hw->name, __func__, fifo->fifonum); continue; } fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; fifo->iso[i].indx = i; /* Init the first iso */ if (ISO_BUFFER_SIZE >= (fifo->usb_packet_maxlen * num_packets_per_urb)) { fill_isoc_urb(fifo->iso[i].urb, fifo->hw->dev, fifo->pipe, fifo->iso[i].buffer, num_packets_per_urb, fifo->usb_packet_maxlen, fifo->intervall, complete, &fifo->iso[i]); memset(fifo->iso[i].buffer, 0, sizeof(fifo->iso[i].buffer)); for (k = 0; k < num_packets_per_urb; k++) { fifo->iso[i].urb-> iso_frame_desc[k].offset = k * packet_size; fifo->iso[i].urb-> iso_frame_desc[k].length = packet_size; } } else { printk(KERN_DEBUG "%s: %s: ISO Buffer size to small!\n", hw->name, __func__); } } fifo->bit_line = BITLINE_INF; errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL); fifo->active = (errcode >= 0) ? 1 : 0; fifo->stop_gracefull = 0; if (errcode < 0) { printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n", hw->name, __func__, symbolic(urb_errlist, errcode), i); } } return fifo->active; } static void stop_iso_gracefull(struct usb_fifo *fifo) { struct hfcsusb *hw = fifo->hw; int i, timeout; u_long flags; for (i = 0; i < 2; i++) { spin_lock_irqsave(&hw->lock, flags); if (debug) printk(KERN_DEBUG "%s: %s for fifo %i.%i\n", hw->name, __func__, fifo->fifonum, i); fifo->stop_gracefull = 1; spin_unlock_irqrestore(&hw->lock, flags); } for (i = 0; i < 2; i++) { timeout = 3; while (fifo->stop_gracefull && timeout--) schedule_timeout_interruptible((HZ / 1000) * 16); if (debug && fifo->stop_gracefull) printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n", hw->name, __func__, fifo->fifonum, i); } } static void stop_int_gracefull(struct usb_fifo *fifo) { struct hfcsusb *hw = fifo->hw; int timeout; u_long flags; spin_lock_irqsave(&hw->lock, flags); if (debug) printk(KERN_DEBUG "%s: %s for fifo %i\n", hw->name, __func__, fifo->fifonum); fifo->stop_gracefull = 1; spin_unlock_irqrestore(&hw->lock, flags); timeout = 3; while (fifo->stop_gracefull && timeout--) schedule_timeout_interruptible((HZ / 1000) * 3); if (debug && fifo->stop_gracefull) printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n", hw->name, __func__, fifo->fifonum); } /* start the interrupt transfer for the given fifo */ static void start_int_fifo(struct usb_fifo *fifo) { struct hfcsusb *hw = fifo->hw; int errcode; if (debug) printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n", hw->name, __func__, fifo->fifonum); if (!fifo->urb) { fifo->urb = usb_alloc_urb(0, GFP_KERNEL); if (!fifo->urb) return; } usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe, fifo->buffer, fifo->usb_packet_maxlen, (usb_complete_t)rx_int_complete, fifo, fifo->intervall); fifo->active = 1; fifo->stop_gracefull = 0; errcode = usb_submit_urb(fifo->urb, GFP_KERNEL); if (errcode) { printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n", hw->name, __func__, errcode); fifo->active = 0; } } static void setPortMode(struct hfcsusb *hw) { if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__, (hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT"); if (hw->protocol == ISDN_P_TE_S0) { write_reg(hw, HFCUSB_SCTRL, 0x40); write_reg(hw, HFCUSB_SCTRL_E, 0x00); write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE); write_reg(hw, HFCUSB_STATES, 3 | 0x10); write_reg(hw, HFCUSB_STATES, 3); } else { write_reg(hw, HFCUSB_SCTRL, 0x44); write_reg(hw, HFCUSB_SCTRL_E, 0x09); write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT); write_reg(hw, HFCUSB_STATES, 1 | 0x10); write_reg(hw, HFCUSB_STATES, 1); } } static void reset_hfcsusb(struct hfcsusb *hw) { struct usb_fifo *fifo; int i; if (debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); /* do Chip reset */ write_reg(hw, HFCUSB_CIRM, 8); /* aux = output, reset off */ write_reg(hw, HFCUSB_CIRM, 0x10); /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */ write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) | ((hw->packet_size / 8) << 4)); /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */ write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size); /* enable PCM/GCI master mode */ write_reg(hw, HFCUSB_MST_MODE1, 0); /* set default values */ write_reg(hw, HFCUSB_MST_MODE0, 1); /* enable master mode */ /* init the fifos */ write_reg(hw, HFCUSB_F_THRES, (HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4)); fifo = hw->fifos; for (i = 0; i < HFCUSB_NUM_FIFOS; i++) { write_reg(hw, HFCUSB_FIFO, i); /* select the desired fifo */ fifo[i].max_size = (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN; fifo[i].last_urblen = 0; /* set 2 bit for D- & E-channel */ write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2)); /* enable all fifos */ if (i == HFCUSB_D_TX) write_reg(hw, HFCUSB_CON_HDLC, (hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09); else write_reg(hw, HFCUSB_CON_HDLC, 0x08); write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */ } write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */ handle_led(hw, LED_POWER_ON); } /* start USB data pipes dependand on device's endpoint configuration */ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel) { /* quick check if endpoint already running */ if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active)) return; if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active)) return; if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active)) return; if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active)) return; /* start rx endpoints using USB INT IN method */ if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO) start_int_fifo(hw->fifos + channel * 2 + 1); /* start rx endpoints using USB ISO IN method */ if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) { switch (channel) { case HFC_CHAN_D: start_isoc_chain(hw->fifos + HFCUSB_D_RX, ISOC_PACKETS_D, (usb_complete_t)rx_iso_complete, 16); break; case HFC_CHAN_E: start_isoc_chain(hw->fifos + HFCUSB_PCM_RX, ISOC_PACKETS_D, (usb_complete_t)rx_iso_complete, 16); break; case HFC_CHAN_B1: start_isoc_chain(hw->fifos + HFCUSB_B1_RX, ISOC_PACKETS_B, (usb_complete_t)rx_iso_complete, 16); break; case HFC_CHAN_B2: start_isoc_chain(hw->fifos + HFCUSB_B2_RX, ISOC_PACKETS_B, (usb_complete_t)rx_iso_complete, 16); break; } } /* start tx endpoints using USB ISO OUT method */ switch (channel) { case HFC_CHAN_D: start_isoc_chain(hw->fifos + HFCUSB_D_TX, ISOC_PACKETS_B, (usb_complete_t)tx_iso_complete, 1); break; case HFC_CHAN_B1: start_isoc_chain(hw->fifos + HFCUSB_B1_TX, ISOC_PACKETS_D, (usb_complete_t)tx_iso_complete, 1); break; case HFC_CHAN_B2: start_isoc_chain(hw->fifos + HFCUSB_B2_TX, ISOC_PACKETS_B, (usb_complete_t)tx_iso_complete, 1); break; } } /* stop USB data pipes dependand on device's endpoint configuration */ static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel) { /* quick check if endpoint currently running */ if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active)) return; if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active)) return; if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active)) return; if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active)) return; /* rx endpoints using USB INT IN method */ if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO) stop_int_gracefull(hw->fifos + channel * 2 + 1); /* rx endpoints using USB ISO IN method */ if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) stop_iso_gracefull(hw->fifos + channel * 2 + 1); /* tx endpoints using USB ISO OUT method */ if (channel != HFC_CHAN_E) stop_iso_gracefull(hw->fifos + channel * 2); } /* Hardware Initialization */ static int setup_hfcsusb(struct hfcsusb *hw) { void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL); u_char b; int ret; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); if (!dmabuf) return -ENOMEM; ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf); memcpy(&b, dmabuf, sizeof(u_char)); kfree(dmabuf); /* check the chip id */ if (ret != 1) { printk(KERN_DEBUG "%s: %s: cannot read chip id\n", hw->name, __func__); return 1; } if (b != HFCUSB_CHIPID) { printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n", hw->name, __func__, b); return 1; } /* first set the needed config, interface and alternate */ (void) usb_set_interface(hw->dev, hw->if_used, hw->alt_used); hw->led_state = 0; /* init the background machinery for control requests */ hw->ctrl_read.bRequestType = 0xc0; hw->ctrl_read.bRequest = 1; hw->ctrl_read.wLength = cpu_to_le16(1); hw->ctrl_write.bRequestType = 0x40; hw->ctrl_write.bRequest = 0; hw->ctrl_write.wLength = 0; usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe, (u_char *)&hw->ctrl_write, NULL, 0, (usb_complete_t)ctrl_complete, hw); reset_hfcsusb(hw); return 0; } static void release_hw(struct hfcsusb *hw) { if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); /* * stop all endpoints gracefully * TODO: mISDN_core should generate CLOSE_CHANNEL * signals after calling mISDN_unregister_device() */ hfcsusb_stop_endpoint(hw, HFC_CHAN_D); hfcsusb_stop_endpoint(hw, HFC_CHAN_B1); hfcsusb_stop_endpoint(hw, HFC_CHAN_B2); if (hw->fifos[HFCUSB_PCM_RX].pipe) hfcsusb_stop_endpoint(hw, HFC_CHAN_E); if (hw->protocol == ISDN_P_TE_S0) l1_event(hw->dch.l1, CLOSE_CHANNEL); mISDN_unregister_device(&hw->dch.dev); mISDN_freebchannel(&hw->bch[1]); mISDN_freebchannel(&hw->bch[0]); mISDN_freedchannel(&hw->dch); if (hw->ctrl_urb) { usb_kill_urb(hw->ctrl_urb); usb_free_urb(hw->ctrl_urb); hw->ctrl_urb = NULL; } if (hw->intf) usb_set_intfdata(hw->intf, NULL); list_del(&hw->list); kfree(hw); hw = NULL; } static void deactivate_bchannel(struct bchannel *bch) { struct hfcsusb *hw = bch->hw; u_long flags; if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n", hw->name, __func__, bch->nr); spin_lock_irqsave(&hw->lock, flags); mISDN_clear_bchannel(bch); spin_unlock_irqrestore(&hw->lock, flags); hfcsusb_setup_bch(bch, ISDN_P_NONE); hfcsusb_stop_endpoint(hw, bch->nr - 1); } /* * Layer 1 B-channel hardware access */ static int hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct bchannel *bch = container_of(ch, struct bchannel, ch); int ret = -EINVAL; if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case HW_TESTRX_RAW: case HW_TESTRX_HDLC: case HW_TESTRX_OFF: ret = -EINVAL; break; case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); deactivate_bchannel(bch); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); ret = 0; break; case CONTROL_CHANNEL: ret = channel_bctrl(bch, arg); break; default: printk(KERN_WARNING "%s: unknown prim(%x)\n", __func__, cmd); } return ret; } static int setup_instance(struct hfcsusb *hw, struct device *parent) { u_long flags; int err, i; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); spin_lock_init(&hw->ctrl_lock); spin_lock_init(&hw->lock); mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state); hw->dch.debug = debug & 0xFFFF; hw->dch.hw = hw; hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); hw->dch.dev.D.send = hfcusb_l2l1D; hw->dch.dev.D.ctrl = hfc_dctrl; /* enable E-Channel logging */ if (hw->fifos[HFCUSB_PCM_RX].pipe) mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL); hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); hw->dch.dev.nrbchan = 2; for (i = 0; i < 2; i++) { hw->bch[i].nr = i + 1; set_channelmap(i + 1, hw->dch.dev.channelmap); hw->bch[i].debug = debug; mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM, poll >> 1); hw->bch[i].hw = hw; hw->bch[i].ch.send = hfcusb_l2l1B; hw->bch[i].ch.ctrl = hfc_bctrl; hw->bch[i].ch.nr = i + 1; list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels); } hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0]; hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0]; hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1]; hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1]; hw->fifos[HFCUSB_D_TX].dch = &hw->dch; hw->fifos[HFCUSB_D_RX].dch = &hw->dch; hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech; hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech; err = setup_hfcsusb(hw); if (err) goto out; snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME, hfcsusb_cnt + 1); printk(KERN_INFO "%s: registered as '%s'\n", DRIVER_NAME, hw->name); err = mISDN_register_device(&hw->dch.dev, parent, hw->name); if (err) goto out; hfcsusb_cnt++; write_lock_irqsave(&HFClock, flags); list_add_tail(&hw->list, &HFClist); write_unlock_irqrestore(&HFClock, flags); return 0; out: mISDN_freebchannel(&hw->bch[1]); mISDN_freebchannel(&hw->bch[0]); mISDN_freedchannel(&hw->dch); kfree(hw); return err; } static int hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct hfcsusb *hw; struct usb_device *dev = interface_to_usbdev(intf); struct usb_host_interface *iface = intf->cur_altsetting; struct usb_host_interface *iface_used = NULL; struct usb_host_endpoint *ep; struct hfcsusb_vdata *driver_info; int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx, probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found, ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size, alt_used = 0; vend_idx = 0xffff; for (i = 0; hfcsusb_idtab[i].idVendor; i++) { if ((le16_to_cpu(dev->descriptor.idVendor) == hfcsusb_idtab[i].idVendor) && (le16_to_cpu(dev->descriptor.idProduct) == hfcsusb_idtab[i].idProduct)) { vend_idx = i; continue; } } printk(KERN_DEBUG "%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n", __func__, ifnum, iface->desc.bAlternateSetting, intf->minor, vend_idx); if (vend_idx == 0xffff) { printk(KERN_WARNING "%s: no valid vendor found in USB descriptor\n", __func__); return -EIO; } /* if vendor and product ID is OK, start probing alternate settings */ alt_idx = 0; small_match = -1; /* default settings */ iso_packet_size = 16; packet_size = 64; while (alt_idx < intf->num_altsetting) { iface = intf->altsetting + alt_idx; probe_alt_setting = iface->desc.bAlternateSetting; cfg_used = 0; while (validconf[cfg_used][0]) { cfg_found = 1; vcf = validconf[cfg_used]; ep = iface->endpoint; memcpy(cmptbl, vcf, 16 * sizeof(int)); /* check for all endpoints in this alternate setting */ for (i = 0; i < iface->desc.bNumEndpoints; i++) { ep_addr = ep->desc.bEndpointAddress; /* get endpoint base */ idx = ((ep_addr & 0x7f) - 1) * 2; if (idx > 15) return -EIO; if (ep_addr & 0x80) idx++; attr = ep->desc.bmAttributes; if (cmptbl[idx] != EP_NOP) { if (cmptbl[idx] == EP_NUL) cfg_found = 0; if (attr == USB_ENDPOINT_XFER_INT && cmptbl[idx] == EP_INT) cmptbl[idx] = EP_NUL; if (attr == USB_ENDPOINT_XFER_BULK && cmptbl[idx] == EP_BLK) cmptbl[idx] = EP_NUL; if (attr == USB_ENDPOINT_XFER_ISOC && cmptbl[idx] == EP_ISO) cmptbl[idx] = EP_NUL; if (attr == USB_ENDPOINT_XFER_INT && ep->desc.bInterval < vcf[17]) { cfg_found = 0; } } ep++; } for (i = 0; i < 16; i++) if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL) cfg_found = 0; if (cfg_found) { if (small_match < cfg_used) { small_match = cfg_used; alt_used = probe_alt_setting; iface_used = iface; } } cfg_used++; } alt_idx++; } /* (alt_idx < intf->num_altsetting) */ /* not found a valid USB Ta Endpoint config */ if (small_match == -1) return -EIO; iface = iface_used; hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL); if (!hw) return -ENOMEM; /* got no mem */ snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME); ep = iface->endpoint; vcf = validconf[small_match]; for (i = 0; i < iface->desc.bNumEndpoints; i++) { struct usb_fifo *f; ep_addr = ep->desc.bEndpointAddress; /* get endpoint base */ idx = ((ep_addr & 0x7f) - 1) * 2; if (ep_addr & 0x80) idx++; f = &hw->fifos[idx & 7]; /* init Endpoints */ if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) { ep++; continue; } switch (ep->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: f->pipe = usb_rcvintpipe(dev, ep->desc.bEndpointAddress); f->usb_transfer_mode = USB_INT; packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; case USB_ENDPOINT_XFER_BULK: if (ep_addr & 0x80) f->pipe = usb_rcvbulkpipe(dev, ep->desc.bEndpointAddress); else f->pipe = usb_sndbulkpipe(dev, ep->desc.bEndpointAddress); f->usb_transfer_mode = USB_BULK; packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; case USB_ENDPOINT_XFER_ISOC: if (ep_addr & 0x80) f->pipe = usb_rcvisocpipe(dev, ep->desc.bEndpointAddress); else f->pipe = usb_sndisocpipe(dev, ep->desc.bEndpointAddress); f->usb_transfer_mode = USB_ISOC; iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; default: f->pipe = 0; } if (f->pipe) { f->fifonum = idx & 7; f->hw = hw; f->usb_packet_maxlen = le16_to_cpu(ep->desc.wMaxPacketSize); f->intervall = ep->desc.bInterval; } ep++; } hw->dev = dev; /* save device */ hw->if_used = ifnum; /* save used interface */ hw->alt_used = alt_used; /* and alternate config */ hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */ hw->cfg_used = vcf[16]; /* store used config */ hw->vend_idx = vend_idx; /* store found vendor */ hw->packet_size = packet_size; hw->iso_packet_size = iso_packet_size; /* create the control pipes needed for register access */ hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0); hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0); driver_info = (struct hfcsusb_vdata *) hfcsusb_idtab[vend_idx].driver_info; hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hw->ctrl_urb) { pr_warn("%s: No memory for control urb\n", driver_info->vend_name); kfree(hw); return -ENOMEM; } pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n", hw->name, __func__, driver_info->vend_name, conf_str[small_match], ifnum, alt_used); if (setup_instance(hw, dev->dev.parent)) return -EIO; hw->intf = intf; usb_set_intfdata(hw->intf, hw); return 0; } /* function called when an active device is removed */ static void hfcsusb_disconnect(struct usb_interface *intf) { struct hfcsusb *hw = usb_get_intfdata(intf); struct hfcsusb *next; int cnt = 0; printk(KERN_INFO "%s: device disconnected\n", hw->name); handle_led(hw, LED_POWER_OFF); release_hw(hw); list_for_each_entry_safe(hw, next, &HFClist, list) cnt++; if (!cnt) hfcsusb_cnt = 0; usb_set_intfdata(intf, NULL); } static struct usb_driver hfcsusb_drv = { .name = DRIVER_NAME, .id_table = hfcsusb_idtab, .probe = hfcsusb_probe, .disconnect = hfcsusb_disconnect, .disable_hub_initiated_lpm = 1, }; module_usb_driver(hfcsusb_drv); |
18 3 15 19 3 16 85 58 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 | // SPDX-License-Identifier: GPL-2.0-only /* * IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP) * * Copyright (c) 2012 Massachusetts Institute of Technology * * Adapted from code in net/8021q/vlan_gvrp.c * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> */ #include <linux/types.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <net/mrp.h> #include "vlan.h" #define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } enum mvrp_attributes { MVRP_ATTR_INVALID, MVRP_ATTR_VID, __MVRP_ATTR_MAX }; #define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1) static struct mrp_application vlan_mrp_app __read_mostly = { .type = MRP_APPLICATION_MVRP, .maxattr = MVRP_ATTR_MAX, .pkttype.type = htons(ETH_P_MVRP), .group_address = MRP_MVRP_ADDRESS, .version = 0, }; int vlan_mvrp_request_join(const struct net_device *dev) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); if (vlan->vlan_proto != htons(ETH_P_8021Q)) return 0; return mrp_request_join(vlan->real_dev, &vlan_mrp_app, &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); } void vlan_mvrp_request_leave(const struct net_device *dev) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); if (vlan->vlan_proto != htons(ETH_P_8021Q)) return; mrp_request_leave(vlan->real_dev, &vlan_mrp_app, &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); } int vlan_mvrp_init_applicant(struct net_device *dev) { return mrp_init_applicant(dev, &vlan_mrp_app); } void vlan_mvrp_uninit_applicant(struct net_device *dev) { mrp_uninit_applicant(dev, &vlan_mrp_app); } int __init vlan_mvrp_init(void) { return mrp_register_application(&vlan_mrp_app); } void vlan_mvrp_uninit(void) { mrp_unregister_application(&vlan_mrp_app); } |
21 1 417 14 3 1 2 106 106 105 106 400 546 400 301 301 3 3 3 3 3 10 145 143 145 145 144 1 2 506 90 90 143 144 301 302 194 194 194 301 107 301 194 504 505 340 6 301 5 5 11 3 2 6 342 342 1 342 2 508 509 510 103 102 103 101 102 103 103 103 34 32 11 434 435 434 435 435 11 2 9 11 11 400 401 399 401 429 3 427 429 22 3 3 3 3 3 3 22 22 3 3 1 2 508 2 2 505 506 309 5 509 341 1 309 311 311 11 307 505 506 341 307 510 508 122 2 506 510 510 1 2 1 14 14 14 14 13 1 14 11 1 1 19 1 22 394 393 393 394 392 22 21 394 373 22 22 21 416 396 21 1 5 5 5 416 5 5 415 415 417 374 20 2 394 416 417 417 375 375 375 49 417 400 401 400 400 26 435 435 3 2 3 3 3 3 2 49 49 499 495 1 22 494 6 423 295 27 324 324 324 324 29 29 28 29 29 29 29 29 265 265 73 73 2 2 2 2 2 1 1 1 70 69 70 70 29 29 161 162 161 27 27 27 27 27 415 590 590 70 68 4 147 146 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 | // SPDX-License-Identifier: GPL-2.0-only /* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * MMU support * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "irq.h" #include "ioapic.h" #include "mmu.h" #include "mmu_internal.h" #include "tdp_mmu.h" #include "x86.h" #include "kvm_cache_regs.h" #include "smm.h" #include "kvm_emulate.h" #include "page_track.h" #include "cpuid.h" #include "spte.h" #include <linux/kvm_host.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/moduleparam.h> #include <linux/export.h> #include <linux/swap.h> #include <linux/hugetlb.h> #include <linux/compiler.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/kern_levels.h> #include <linux/kstrtox.h> #include <linux/kthread.h> #include <linux/wordpart.h> #include <asm/page.h> #include <asm/memtype.h> #include <asm/cmpxchg.h> #include <asm/io.h> #include <asm/set_memory.h> #include <asm/spec-ctrl.h> #include <asm/vmx.h> #include "trace.h" static bool nx_hugepage_mitigation_hard_disabled; int __read_mostly nx_huge_pages = -1; static uint __read_mostly nx_huge_pages_recovery_period_ms; #ifdef CONFIG_PREEMPT_RT /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ static uint __read_mostly nx_huge_pages_recovery_ratio = 0; #else static uint __read_mostly nx_huge_pages_recovery_ratio = 60; #endif static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp); static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops nx_huge_pages_ops = { .set = set_nx_huge_pages, .get = get_nx_huge_pages, }; static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = { .set = set_nx_huge_pages_recovery_param, .get = param_get_uint, }; module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); __MODULE_PARM_TYPE(nx_huge_pages, "bool"); module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops, &nx_huge_pages_recovery_ratio, 0644); __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops, &nx_huge_pages_recovery_period_ms, 0644); __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint"); static bool __read_mostly force_flush_and_sync_on_reuse; module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); /* * When setting this variable to true it enables Two-Dimensional-Paging * where the hardware walks 2 page tables: * 1. the guest-virtual to guest-physical * 2. while doing 1. it walks guest-physical to host-physical * If the hardware supports that we don't need to do shadow paging. */ bool tdp_enabled = false; static bool __ro_after_init tdp_mmu_allowed; #ifdef CONFIG_X86_64 bool __read_mostly tdp_mmu_enabled = true; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444); #endif static int max_huge_page_level __read_mostly; static int tdp_root_level __read_mostly; static int max_tdp_level __read_mostly; #define PTE_PREFETCH_NUM 8 #include <trace/events/kvm.h> /* make pte_list_desc fit well in cache lines */ #define PTE_LIST_EXT 14 /* * struct pte_list_desc is the core data structure used to implement a custom * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a * given GFN when used in the context of rmaps. Using a custom list allows KVM * to optimize for the common case where many GFNs will have at most a handful * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small * memory footprint, which in turn improves runtime performance by exploiting * cache locality. * * A list is comprised of one or more pte_list_desc objects (descriptors). * Each individual descriptor stores up to PTE_LIST_EXT SPTEs. If a descriptor * is full and a new SPTEs needs to be added, a new descriptor is allocated and * becomes the head of the list. This means that by definitions, all tail * descriptors are full. * * Note, the meta data fields are deliberately placed at the start of the * structure to optimize the cacheline layout; accessing the descriptor will * touch only a single cacheline so long as @spte_count<=6 (or if only the * descriptors metadata is accessed). */ struct pte_list_desc { struct pte_list_desc *more; /* The number of PTEs stored in _this_ descriptor. */ u32 spte_count; /* The number of PTEs stored in all tails of this descriptor. */ u32 tail_count; u64 *sptes[PTE_LIST_EXT]; }; struct kvm_shadow_walk_iterator { u64 addr; hpa_t shadow_addr; u64 *sptep; int level; unsigned index; }; #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ (_root), (_addr)); \ shadow_walk_okay(&(_walker)); \ shadow_walk_next(&(_walker))) #define for_each_shadow_entry(_vcpu, _addr, _walker) \ for (shadow_walk_init(&(_walker), _vcpu, _addr); \ shadow_walk_okay(&(_walker)); \ shadow_walk_next(&(_walker))) #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ for (shadow_walk_init(&(_walker), _vcpu, _addr); \ shadow_walk_okay(&(_walker)) && \ ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ __shadow_walk_next(&(_walker), spte)) static struct kmem_cache *pte_list_desc_cache; struct kmem_cache *mmu_page_header_cache; static void mmu_spte_set(u64 *sptep, u64 spte); struct kvm_mmu_role_regs { const unsigned long cr0; const unsigned long cr4; const u64 efer; }; #define CREATE_TRACE_POINTS #include "mmutrace.h" /* * Yes, lot's of underscores. They're a hint that you probably shouldn't be * reading from the role_regs. Once the root_role is constructed, it becomes * the single source of truth for the MMU's state. */ #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \ static inline bool __maybe_unused \ ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs) \ { \ return !!(regs->reg & flag); \ } BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG); BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE); BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57); BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX); BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); /* * The MMU itself (with a valid role) is the single source of truth for the * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1, * and the vCPU may be incorrect/irrelevant. */ #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \ static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ { \ return !!(mmu->cpu_role. base_or_ext . reg##_##name); \ } BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); static inline bool is_cr0_pg(struct kvm_mmu *mmu) { return mmu->cpu_role.base.level > 0; } static inline bool is_cr4_pae(struct kvm_mmu *mmu) { return !mmu->cpu_role.base.has_4_byte_gpte; } static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu) { struct kvm_mmu_role_regs regs = { .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS), .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS), .efer = vcpu->arch.efer, }; return regs; } static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu) { return kvm_read_cr3(vcpu); } static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) return kvm_read_cr3(vcpu); return mmu->get_guest_pgd(vcpu); } static inline bool kvm_available_flush_remote_tlbs_range(void) { #if IS_ENABLED(CONFIG_HYPERV) return kvm_x86_ops.flush_remote_tlbs_range; #else return false; #endif } static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index); /* Flush the range of guest memory mapped by the given SPTE. */ static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); } static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned int access) { u64 spte = make_mmio_spte(vcpu, gfn, access); trace_mark_mmio_spte(sptep, gfn, spte); mmu_spte_set(sptep, spte); } static gfn_t get_mmio_spte_gfn(u64 spte) { u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) & shadow_nonpresent_or_rsvd_mask; return gpa >> PAGE_SHIFT; } static unsigned get_mmio_spte_access(u64 spte) { return spte & shadow_mmio_access_mask; } static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) { u64 kvm_gen, spte_gen, gen; gen = kvm_vcpu_memslots(vcpu)->generation; if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) return false; kvm_gen = gen & MMIO_SPTE_GEN_MASK; spte_gen = get_mmio_spte_generation(spte); trace_check_mmio_spte(spte, kvm_gen, spte_gen); return likely(kvm_gen == spte_gen); } static int is_cpuid_PSE36(void) { return 1; } #ifdef CONFIG_X86_64 static void __set_spte(u64 *sptep, u64 spte) { KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); WRITE_ONCE(*sptep, spte); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) { KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); WRITE_ONCE(*sptep, spte); } static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) { KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); return xchg(sptep, spte); } static u64 __get_spte_lockless(u64 *sptep) { return READ_ONCE(*sptep); } #else union split_spte { struct { u32 spte_low; u32 spte_high; }; u64 spte; }; static void count_spte_clear(u64 *sptep, u64 spte) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); if (is_shadow_present_pte(spte)) return; /* Ensure the spte is completely set before we increase the count */ smp_wmb(); sp->clear_spte_count++; } static void __set_spte(u64 *sptep, u64 spte) { union split_spte *ssptep, sspte; ssptep = (union split_spte *)sptep; sspte = (union split_spte)spte; ssptep->spte_high = sspte.spte_high; /* * If we map the spte from nonpresent to present, We should store * the high bits firstly, then set present bit, so cpu can not * fetch this spte while we are setting the spte. */ smp_wmb(); WRITE_ONCE(ssptep->spte_low, sspte.spte_low); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) { union split_spte *ssptep, sspte; ssptep = (union split_spte *)sptep; sspte = (union split_spte)spte; WRITE_ONCE(ssptep->spte_low, sspte.spte_low); /* * If we map the spte from present to nonpresent, we should clear * present bit firstly to avoid vcpu fetch the old high bits. */ smp_wmb(); ssptep->spte_high = sspte.spte_high; count_spte_clear(sptep, spte); } static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) { union split_spte *ssptep, sspte, orig; ssptep = (union split_spte *)sptep; sspte = (union split_spte)spte; /* xchg acts as a barrier before the setting of the high bits */ orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); orig.spte_high = ssptep->spte_high; ssptep->spte_high = sspte.spte_high; count_spte_clear(sptep, spte); return orig.spte; } /* * The idea using the light way get the spte on x86_32 guest is from * gup_get_pte (mm/gup.c). * * An spte tlb flush may be pending, because they are coalesced and * we are running out of the MMU lock. Therefore * we need to protect against in-progress updates of the spte. * * Reading the spte while an update is in progress may get the old value * for the high part of the spte. The race is fine for a present->non-present * change (because the high part of the spte is ignored for non-present spte), * but for a present->present change we must reread the spte. * * All such changes are done in two steps (present->non-present and * non-present->present), hence it is enough to count the number of * present->non-present updates: if it changed while reading the spte, * we might have hit the race. This is done using clear_spte_count. */ static u64 __get_spte_lockless(u64 *sptep) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); union split_spte spte, *orig = (union split_spte *)sptep; int count; retry: count = sp->clear_spte_count; smp_rmb(); spte.spte_low = orig->spte_low; smp_rmb(); spte.spte_high = orig->spte_high; smp_rmb(); if (unlikely(spte.spte_low != orig->spte_low || count != sp->clear_spte_count)) goto retry; return spte.spte; } #endif /* Rules for using mmu_spte_set: * Set the sptep from nonpresent to present. * Note: the sptep being assigned *must* be either not present * or in a state where the hardware will not attempt to update * the spte. */ static void mmu_spte_set(u64 *sptep, u64 new_spte) { WARN_ON_ONCE(is_shadow_present_pte(*sptep)); __set_spte(sptep, new_spte); } /* Rules for using mmu_spte_update: * Update the state bits, it means the mapped pfn is not changed. * * Returns true if the TLB needs to be flushed */ static bool mmu_spte_update(u64 *sptep, u64 new_spte) { u64 old_spte = *sptep; WARN_ON_ONCE(!is_shadow_present_pte(new_spte)); check_spte_writable_invariants(new_spte); if (!is_shadow_present_pte(old_spte)) { mmu_spte_set(sptep, new_spte); return false; } if (!spte_needs_atomic_update(old_spte)) __update_clear_spte_fast(sptep, new_spte); else old_spte = __update_clear_spte_slow(sptep, new_spte); WARN_ON_ONCE(!is_shadow_present_pte(old_spte) || spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); return leaf_spte_change_needs_tlb_flush(old_spte, new_spte); } /* * Rules for using mmu_spte_clear_track_bits: * It sets the sptep from present to nonpresent, and track the * state bits, it is used to clear the last level sptep. * Returns the old PTE. */ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) { u64 old_spte = *sptep; int level = sptep_to_sp(sptep)->role.level; if (!is_shadow_present_pte(old_spte) || !spte_needs_atomic_update(old_spte)) __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE); else old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE); if (!is_shadow_present_pte(old_spte)) return old_spte; kvm_update_page_stats(kvm, level, -1); return old_spte; } /* * Rules for using mmu_spte_clear_no_track: * Directly clear spte without caring the state bits of sptep, * it is used to set the upper level spte. */ static void mmu_spte_clear_no_track(u64 *sptep) { __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE); } static u64 mmu_spte_get_lockless(u64 *sptep) { return __get_spte_lockless(sptep); } static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu) { return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; } static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) { if (is_tdp_mmu_active(vcpu)) { kvm_tdp_mmu_walk_lockless_begin(); } else { /* * Prevent page table teardown by making any free-er wait during * kvm_flush_remote_tlbs() IPI to all active vcpus. */ local_irq_disable(); /* * Make sure a following spte read is not reordered ahead of the write * to vcpu->mode. */ smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); } } static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) { if (is_tdp_mmu_active(vcpu)) { kvm_tdp_mmu_walk_lockless_end(); } else { /* * Make sure the write to vcpu->mode is not reordered in front of * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. */ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); local_irq_enable(); } } static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */ r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); if (r) return r; if (kvm_has_mirrored_tdp(vcpu->kvm)) { r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache, PT64_ROOT_MAX_LEVEL); if (r) return r; } r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, PT64_ROOT_MAX_LEVEL); if (r) return r; if (maybe_indirect) { r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache, PT64_ROOT_MAX_LEVEL); if (r) return r; } return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, PT64_ROOT_MAX_LEVEL); } static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) { kmem_cache_free(pte_list_desc_cache, pte_list_desc); } static bool sp_has_gptes(struct kvm_mmu_page *sp); static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) { if (sp->role.passthrough) return sp->gfn; if (sp->shadowed_translation) return sp->shadowed_translation[index] >> PAGE_SHIFT; return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); } /* * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note * that the SPTE itself may have a more constrained access permissions that * what the guest enforces. For example, a guest may create an executable * huge PTE but KVM may disallow execution to mitigate iTLB multihit. */ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index) { if (sp->shadowed_translation) return sp->shadowed_translation[index] & ACC_ALL; /* * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs, * KVM is not shadowing any guest page tables, so the "guest access * permissions" are just ACC_ALL. * * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM * is shadowing a guest huge page with small pages, the guest access * permissions being shadowed are the access permissions of the huge * page. * * In both cases, sp->role.access contains the correct access bits. */ return sp->role.access; } static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, gfn_t gfn, unsigned int access) { if (sp->shadowed_translation) { sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; return; } WARN_ONCE(access != kvm_mmu_page_get_access(sp, index), "access mismatch under %s page %llx (expected %u, got %u)\n", sp->role.passthrough ? "passthrough" : "direct", sp->gfn, kvm_mmu_page_get_access(sp, index), access); WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index), "gfn mismatch under %s page %llx (expected %llx, got %llx)\n", sp->role.passthrough ? "passthrough" : "direct", sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn); } static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index, unsigned int access) { gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); kvm_mmu_page_set_translation(sp, index, gfn, access); } /* * Return the pointer to the large page information for a given gfn, * handling slots that are not large page aligned. */ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, const struct kvm_memory_slot *slot, int level) { unsigned long idx; idx = gfn_to_index(gfn, slot->base_gfn, level); return &slot->arch.lpage_info[level - 2][idx]; } /* * The most significant bit in disallow_lpage tracks whether or not memory * attributes are mixed, i.e. not identical for all gfns at the current level. * The lower order bits are used to refcount other cases where a hugepage is * disallowed, e.g. if KVM has shadow a page table at the gfn. */ #define KVM_LPAGE_MIXED_FLAG BIT(31) static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, gfn_t gfn, int count) { struct kvm_lpage_info *linfo; int old, i; for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { linfo = lpage_info_slot(gfn, slot, i); old = linfo->disallow_lpage; linfo->disallow_lpage += count; WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG); } } void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) { update_gfn_disallow_lpage_count(slot, gfn, 1); } void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) { update_gfn_disallow_lpage_count(slot, gfn, -1); } static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; gfn_t gfn; kvm->arch.indirect_shadow_pages++; /* * Ensure indirect_shadow_pages is elevated prior to re-reading guest * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight * emulated writes are visible before re-reading guest PTEs, or that * an emulated write will see the elevated count and acquire mmu_lock * to update SPTEs. Pairs with the smp_mb() in kvm_mmu_track_write(). */ smp_mb(); gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); /* the non-leaf shadow pages are keeping readonly. */ if (sp->role.level > PG_LEVEL_4K) return __kvm_write_track_add_gfn(kvm, slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn); if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K); } void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) { /* * If it's possible to replace the shadow page with an NX huge page, * i.e. if the shadow page is the only thing currently preventing KVM * from using a huge page, add the shadow page to the list of "to be * zapped for NX recovery" pages. Note, the shadow page can already be * on the list if KVM is reusing an existing shadow page, i.e. if KVM * links a shadow page at multiple points. */ if (!list_empty(&sp->possible_nx_huge_page_link)) return; ++kvm->stat.nx_lpage_splits; list_add_tail(&sp->possible_nx_huge_page_link, &kvm->arch.possible_nx_huge_pages); } static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, bool nx_huge_page_possible) { sp->nx_huge_page_disallowed = true; if (nx_huge_page_possible) track_possible_nx_huge_page(kvm, sp); } static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; gfn_t gfn; kvm->arch.indirect_shadow_pages--; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); if (sp->role.level > PG_LEVEL_4K) return __kvm_write_track_remove_gfn(kvm, slot, gfn); kvm_mmu_gfn_allow_lpage(slot, gfn); } void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) { if (list_empty(&sp->possible_nx_huge_page_link)) return; --kvm->stat.nx_lpage_splits; list_del_init(&sp->possible_nx_huge_page_link); } static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) { sp->nx_huge_page_disallowed = false; untrack_possible_nx_huge_page(kvm, sp); } static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { struct kvm_memory_slot *slot; slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return NULL; if (no_dirty_log && kvm_slot_dirty_track_enabled(slot)) return NULL; return slot; } /* * About rmap_head encoding: * * If the bit zero of rmap_head->val is clear, then it points to the only spte * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct * pte_list_desc containing more mappings. */ #define KVM_RMAP_MANY BIT(0) /* * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always * operates with mmu_lock held for write), but rmaps can be walked without * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain * being zapped/dropped _while the rmap is locked_. * * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be * done while holding mmu_lock for write. This allows a task walking rmaps * without holding mmu_lock to concurrently walk the same entries as a task * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify * the rmaps, thus the walks are stable. * * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED, * only the rmap chains themselves are protected. E.g. holding an rmap's lock * ensures all "struct pte_list_desc" fields are stable. */ #define KVM_RMAP_LOCKED BIT(1) static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head) { unsigned long old_val, new_val; lockdep_assert_preemption_disabled(); /* * Elide the lock if the rmap is empty, as lockless walkers (read-only * mode) don't need to (and can't) walk an empty rmap, nor can they add * entries to the rmap. I.e. the only paths that process empty rmaps * do so while holding mmu_lock for write, and are mutually exclusive. */ old_val = atomic_long_read(&rmap_head->val); if (!old_val) return 0; do { /* * If the rmap is locked, wait for it to be unlocked before * trying acquire the lock, e.g. to avoid bouncing the cache * line. */ while (old_val & KVM_RMAP_LOCKED) { cpu_relax(); old_val = atomic_long_read(&rmap_head->val); } /* * Recheck for an empty rmap, it may have been purged by the * task that held the lock. */ if (!old_val) return 0; new_val = old_val | KVM_RMAP_LOCKED; /* * Use try_cmpxchg_acquire() to prevent reads and writes to the rmap * from being reordered outside of the critical section created by * __kvm_rmap_lock(). * * Pairs with the atomic_long_set_release() in kvm_rmap_unlock(). * * For the !old_val case, no ordering is needed, as there is no rmap * to walk. */ } while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val)); /* * Return the old value, i.e. _without_ the LOCKED bit set. It's * impossible for the return value to be 0 (see above), i.e. the read- * only unlock flow can't get a false positive and fail to unlock. */ return old_val; } static unsigned long kvm_rmap_lock(struct kvm *kvm, struct kvm_rmap_head *rmap_head) { lockdep_assert_held_write(&kvm->mmu_lock); return __kvm_rmap_lock(rmap_head); } static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head, unsigned long val) { KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED); /* * Ensure that all accesses to the rmap have completed before unlocking * the rmap. * * Pairs with the atomic_long_try_cmpxchg_acquire() in __kvm_rmap_lock(). */ atomic_long_set_release(&rmap_head->val, val); } static void kvm_rmap_unlock(struct kvm *kvm, struct kvm_rmap_head *rmap_head, unsigned long new_val) { lockdep_assert_held_write(&kvm->mmu_lock); __kvm_rmap_unlock(rmap_head, new_val); } static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head) { return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED; } /* * If mmu_lock isn't held, rmaps can only be locked in read-only mode. The * actual locking is the same, but the caller is disallowed from modifying the * rmap, and so the unlock flow is a nop if the rmap is/was empty. */ static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head) { unsigned long rmap_val; preempt_disable(); rmap_val = __kvm_rmap_lock(rmap_head); if (!rmap_val) preempt_enable(); return rmap_val; } static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head, unsigned long old_val) { if (!old_val) return; KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head)); __kvm_rmap_unlock(rmap_head, old_val); preempt_enable(); } /* * Returns the number of pointers in the rmap chain, not counting the new one. */ static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head) { unsigned long old_val, new_val; struct pte_list_desc *desc; int count = 0; old_val = kvm_rmap_lock(kvm, rmap_head); if (!old_val) { new_val = (unsigned long)spte; } else if (!(old_val & KVM_RMAP_MANY)) { desc = kvm_mmu_memory_cache_alloc(cache); desc->sptes[0] = (u64 *)old_val; desc->sptes[1] = spte; desc->spte_count = 2; desc->tail_count = 0; new_val = (unsigned long)desc | KVM_RMAP_MANY; ++count; } else { desc = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY); count = desc->tail_count + desc->spte_count; /* * If the previous head is full, allocate a new head descriptor * as tail descriptors are always kept full. */ if (desc->spte_count == PTE_LIST_EXT) { desc = kvm_mmu_memory_cache_alloc(cache); desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY); desc->spte_count = 0; desc->tail_count = count; new_val = (unsigned long)desc | KVM_RMAP_MANY; } else { new_val = old_val; } desc->sptes[desc->spte_count++] = spte; } kvm_rmap_unlock(kvm, rmap_head, new_val); return count; } static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val, struct pte_list_desc *desc, int i) { struct pte_list_desc *head_desc = (struct pte_list_desc *)(*rmap_val & ~KVM_RMAP_MANY); int j = head_desc->spte_count - 1; /* * The head descriptor should never be empty. A new head is added only * when adding an entry and the previous head is full, and heads are * removed (this flow) when they become empty. */ KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm); /* * Replace the to-be-freed SPTE with the last valid entry from the head * descriptor to ensure that tail descriptors are full at all times. * Note, this also means that tail_count is stable for each descriptor. */ desc->sptes[i] = head_desc->sptes[j]; head_desc->sptes[j] = NULL; head_desc->spte_count--; if (head_desc->spte_count) return; /* * The head descriptor is empty. If there are no tail descriptors, * nullify the rmap head to mark the list as empty, else point the rmap * head at the next descriptor, i.e. the new head. */ if (!head_desc->more) *rmap_val = 0; else *rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY; mmu_free_pte_list_desc(head_desc); } static void pte_list_remove(struct kvm *kvm, u64 *spte, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; unsigned long rmap_val; int i; rmap_val = kvm_rmap_lock(kvm, rmap_head); if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm)) goto out; if (!(rmap_val & KVM_RMAP_MANY)) { if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_val != spte, kvm)) goto out; rmap_val = 0; } else { desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY); while (desc) { for (i = 0; i < desc->spte_count; ++i) { if (desc->sptes[i] == spte) { pte_list_desc_remove_entry(kvm, &rmap_val, desc, i); goto out; } } desc = desc->more; } KVM_BUG_ON_DATA_CORRUPTION(true, kvm); } out: kvm_rmap_unlock(kvm, rmap_head, rmap_val); } static void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, u64 *sptep) { mmu_spte_clear_track_bits(kvm, sptep); pte_list_remove(kvm, sptep, rmap_head); } /* Return true if at least one SPTE was zapped, false otherwise */ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc, *next; unsigned long rmap_val; int i; rmap_val = kvm_rmap_lock(kvm, rmap_head); if (!rmap_val) return false; if (!(rmap_val & KVM_RMAP_MANY)) { mmu_spte_clear_track_bits(kvm, (u64 *)rmap_val); goto out; } desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY); for (; desc; desc = next) { for (i = 0; i < desc->spte_count; i++) mmu_spte_clear_track_bits(kvm, desc->sptes[i]); next = desc->more; mmu_free_pte_list_desc(desc); } out: /* rmap_head is meaningless now, remember to reset it */ kvm_rmap_unlock(kvm, rmap_head, 0); return true; } unsigned int pte_list_count(struct kvm_rmap_head *rmap_head) { unsigned long rmap_val = kvm_rmap_get(rmap_head); struct pte_list_desc *desc; if (!rmap_val) return 0; else if (!(rmap_val & KVM_RMAP_MANY)) return 1; desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY); return desc->tail_count + desc->spte_count; } static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, const struct kvm_memory_slot *slot) { unsigned long idx; idx = gfn_to_index(gfn, slot->base_gfn, level); return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; } static void rmap_remove(struct kvm *kvm, u64 *spte) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; struct kvm_mmu_page *sp; gfn_t gfn; struct kvm_rmap_head *rmap_head; sp = sptep_to_sp(spte); gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte)); /* * Unlike rmap_add, rmap_remove does not run in the context of a vCPU * so we have to determine which memslots to use based on context * information in sp->role. */ slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); pte_list_remove(kvm, spte, rmap_head); } /* * Used by the following functions to iterate through the sptes linked by a * rmap. All fields are private and not assumed to be used outside. */ struct rmap_iterator { /* private fields */ struct rmap_head *head; struct pte_list_desc *desc; /* holds the sptep if not NULL */ int pos; /* index of the sptep */ }; /* * Iteration must be started by this function. This should also be used after * removing/dropping sptes from the rmap link because in such cases the * information in the iterator may not be valid. * * Returns sptep if found, NULL otherwise. */ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, struct rmap_iterator *iter) { unsigned long rmap_val = kvm_rmap_get(rmap_head); if (!rmap_val) return NULL; if (!(rmap_val & KVM_RMAP_MANY)) { iter->desc = NULL; return (u64 *)rmap_val; } iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY); iter->pos = 0; return iter->desc->sptes[iter->pos]; } /* * Must be used with a valid iterator: e.g. after rmap_get_first(). * * Returns sptep if found, NULL otherwise. */ static u64 *rmap_get_next(struct rmap_iterator *iter) { if (iter->desc) { if (iter->pos < PTE_LIST_EXT - 1) { ++iter->pos; if (iter->desc->sptes[iter->pos]) return iter->desc->sptes[iter->pos]; } iter->desc = iter->desc->more; if (iter->desc) { iter->pos = 0; /* desc->sptes[0] cannot be NULL */ return iter->desc->sptes[iter->pos]; } } return NULL; } #define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \ for (_sptep_ = rmap_get_first(_rmap_head_, _iter_); \ _sptep_; _sptep_ = rmap_get_next(_iter_)) #define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \ if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_)))) \ #define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_) \ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \ if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep))) static void drop_spte(struct kvm *kvm, u64 *sptep) { u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep); if (is_shadow_present_pte(old_spte)) rmap_remove(kvm, sptep); } static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) { struct kvm_mmu_page *sp; sp = sptep_to_sp(sptep); WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); drop_spte(kvm, sptep); if (flush) kvm_flush_remote_tlbs_sptep(kvm, sptep); } /* * Write-protect on the specified @sptep, @pt_protect indicates whether * spte write-protection is caused by protecting shadow page table. * * Note: write protection is difference between dirty logging and spte * protection: * - for dirty logging, the spte can be set to writable at anytime if * its dirty bitmap is properly set. * - for spte protection, the spte can be writable only after unsync-ing * shadow page. * * Return true if tlb need be flushed. */ static bool spte_write_protect(u64 *sptep, bool pt_protect) { u64 spte = *sptep; if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte))) return false; if (pt_protect) spte &= ~shadow_mmu_writable_mask; spte = spte & ~PT_WRITABLE_MASK; return mmu_spte_update(sptep, spte); } static bool rmap_write_protect(struct kvm_rmap_head *rmap_head, bool pt_protect) { u64 *sptep; struct rmap_iterator iter; bool flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) flush |= spte_write_protect(sptep, pt_protect); return flush; } static bool spte_clear_dirty(u64 *sptep) { u64 spte = *sptep; KVM_MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; return mmu_spte_update(sptep, spte); } /* * Gets the GFN ready for another round of dirty logging by clearing the * - D bit on ad-enabled SPTEs, and * - W bit on ad-disabled SPTEs. * Returns true iff any D or W bits were cleared. */ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { u64 *sptep; struct rmap_iterator iter; bool flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) { if (spte_ad_need_write_protect(*sptep)) flush |= test_and_clear_bit(PT_WRITABLE_SHIFT, (unsigned long *)sptep); else flush |= spte_clear_dirty(sptep); } return flush; } static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { struct kvm_rmap_head *rmap_head; if (tdp_mmu_enabled) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, true); if (!kvm_memslots_have_rmaps(kvm)) return; while (mask) { rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PG_LEVEL_4K, slot); rmap_write_protect(rmap_head, false); /* clear the first set bit */ mask &= mask - 1; } } static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { struct kvm_rmap_head *rmap_head; if (tdp_mmu_enabled) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, false); if (!kvm_memslots_have_rmaps(kvm)) return; while (mask) { rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PG_LEVEL_4K, slot); __rmap_clear_dirty(kvm, rmap_head, slot); /* clear the first set bit */ mask &= mask - 1; } } void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { /* * If the slot was assumed to be "initially all dirty", write-protect * huge pages to ensure they are split to 4KiB on the first write (KVM * dirty logs at 4KiB granularity). If eager page splitting is enabled, * immediately try to split huge pages, e.g. so that vCPUs don't get * saddled with the cost of splitting. * * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn * of memslot has no such restriction, so the range can cross two large * pages. */ if (kvm_dirty_log_manual_protect_and_init_set(kvm)) { gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); if (READ_ONCE(eager_page_split)) kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K); kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M); /* Cross two large pages? */ if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) != ALIGN(end << PAGE_SHIFT, PMD_SIZE)) kvm_mmu_slot_gfn_write_protect(kvm, slot, end, PG_LEVEL_2M); } /* * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in * mask. If PML is enabled and the GFN doesn't need to be write- * protected for other reasons, e.g. shadow paging, clear the Dirty bit. * Otherwise clear the Writable bit. * * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is * enabled but it chooses between clearing the Dirty bit and Writeable * bit based on the context. */ if (kvm_x86_ops.cpu_dirty_log_size) kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } int kvm_cpu_dirty_log_size(void) { return kvm_x86_ops.cpu_dirty_log_size; } bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) { struct kvm_rmap_head *rmap_head; int i; bool write_protected = false; if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = gfn_to_rmap(gfn, i, slot); write_protected |= rmap_write_protect(rmap_head, true); } } if (tdp_mmu_enabled) write_protected |= kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); return write_protected; } static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) { struct kvm_memory_slot *slot; slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); } static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { return kvm_zap_all_rmap_sptes(kvm, rmap_head); } struct slot_rmap_walk_iterator { /* input fields. */ const struct kvm_memory_slot *slot; gfn_t start_gfn; gfn_t end_gfn; int start_level; int end_level; /* output fields. */ gfn_t gfn; struct kvm_rmap_head *rmap; int level; /* private field. */ struct kvm_rmap_head *end_rmap; }; static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) { iterator->level = level; iterator->gfn = iterator->start_gfn; iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot); iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot); } static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, const struct kvm_memory_slot *slot, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn) { iterator->slot = slot; iterator->start_level = start_level; iterator->end_level = end_level; iterator->start_gfn = start_gfn; iterator->end_gfn = end_gfn; rmap_walk_init_level(iterator, iterator->start_level); } static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) { return !!iterator->rmap; } static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) { while (++iterator->rmap <= iterator->end_rmap) { iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level); if (atomic_long_read(&iterator->rmap->val)) return; } if (++iterator->level > iterator->end_level) { iterator->rmap = NULL; return; } rmap_walk_init_level(iterator, iterator->level); } #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ _start_gfn, _end_gfn, _iter_) \ for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ _end_level_, _start_gfn, _end_gfn); \ slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) /* The return value indicates if tlb flush on all vcpus is needed. */ typedef bool (*slot_rmaps_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot); static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool can_yield, bool flush_on_yield, bool flush) { struct slot_rmap_walk_iterator iterator; lockdep_assert_held_write(&kvm->mmu_lock); for_each_slot_rmap_range(slot, start_level, end_level, start_gfn, end_gfn, &iterator) { if (iterator.rmap) flush |= fn(kvm, iterator.rmap, slot); if (!can_yield) continue; if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && flush_on_yield) { kvm_flush_remote_tlbs_range(kvm, start_gfn, iterator.gfn - start_gfn + 1); flush = false; } cond_resched_rwlock_write(&kvm->mmu_lock); } } return flush; } static __always_inline bool walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, bool flush_on_yield) { return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level, slot->base_gfn, slot->base_gfn + slot->npages - 1, true, flush_on_yield, false); } static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, bool flush_on_yield) { return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); } static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, bool can_yield, bool flush) { return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, start, end - 1, can_yield, true, flush); } bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { bool flush = false; /* * To prevent races with vCPUs faulting in a gfn using stale data, * zapping a gfn range must be protected by mmu_invalidate_in_progress * (and mmu_invalidate_seq). The only exception is memslot deletion; * in that case, SRCU synchronization ensures that SPTEs are zapped * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the * invalid slot. */ lockdep_assert_once(kvm->mmu_invalidate_in_progress || lockdep_is_held(&kvm->slots_lock)); if (kvm_memslots_have_rmaps(kvm)) flush = __kvm_rmap_zap_gfn_range(kvm, range->slot, range->start, range->end, range->may_block, flush); if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); if (kvm_x86_ops.set_apic_access_page_addr && range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); return flush; } #define RMAP_RECYCLE_THRESHOLD 1000 static void __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) { struct kvm_mmu_page *sp; struct kvm_rmap_head *rmap_head; int rmap_count; sp = sptep_to_sp(spte); kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access); kvm_update_page_stats(kvm, sp->role.level, 1); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); rmap_count = pte_list_add(kvm, cache, spte, rmap_head); if (rmap_count > kvm->stat.max_mmu_rmap_size) kvm->stat.max_mmu_rmap_size = rmap_count; if (rmap_count > RMAP_RECYCLE_THRESHOLD) { kvm_zap_all_rmap_sptes(kvm, rmap_head); kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); } } static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) { struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache; __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access); } static bool kvm_rmap_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool test_only) { struct kvm_rmap_head *rmap_head; struct rmap_iterator iter; unsigned long rmap_val; bool young = false; u64 *sptep; gfn_t gfn; int level; u64 spte; for (level = PG_LEVEL_4K; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { for (gfn = range->start; gfn < range->end; gfn += KVM_PAGES_PER_HPAGE(level)) { rmap_head = gfn_to_rmap(gfn, level, range->slot); rmap_val = kvm_rmap_lock_readonly(rmap_head); for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) { if (!is_accessed_spte(spte)) continue; if (test_only) { kvm_rmap_unlock_readonly(rmap_head, rmap_val); return true; } if (spte_ad_enabled(spte)) clear_bit((ffs(shadow_accessed_mask) - 1), (unsigned long *)sptep); else /* * If the following cmpxchg fails, the * spte is being concurrently modified * and should most likely stay young. */ cmpxchg64(sptep, spte, mark_spte_for_access_track(spte)); young = true; } kvm_rmap_unlock_readonly(rmap_head, rmap_val); } } return young; } static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm) { return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages); } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { bool young = false; if (tdp_mmu_enabled) young = kvm_tdp_mmu_age_gfn_range(kvm, range); if (kvm_may_have_shadow_mmu_sptes(kvm)) young |= kvm_rmap_age_gfn_range(kvm, range, false); return young; } bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { bool young = false; if (tdp_mmu_enabled) young = kvm_tdp_mmu_test_age_gfn(kvm, range); if (young) return young; if (kvm_may_have_shadow_mmu_sptes(kvm)) young |= kvm_rmap_age_gfn_range(kvm, range, true); return young; } static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) { #ifdef CONFIG_KVM_PROVE_MMU int i; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free", sp->spt[i], &sp->spt[i], kvm_mmu_page_get_gfn(sp, i)); } #endif } static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm->arch.n_used_mmu_pages++; kvm_account_pgtable_pages((void *)sp->spt, +1); } static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm->arch.n_used_mmu_pages--; kvm_account_pgtable_pages((void *)sp->spt, -1); } static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) { kvm_mmu_check_sptes_at_free(sp); hlist_del(&sp->hash_link); list_del(&sp->link); free_page((unsigned long)sp->spt); free_page((unsigned long)sp->shadowed_translation); kmem_cache_free(mmu_page_header_cache, sp); } static unsigned kvm_page_table_hashfn(gfn_t gfn) { return hash_64(gfn, KVM_MMU_HASH_SHIFT); } static void mmu_page_add_parent_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, struct kvm_mmu_page *sp, u64 *parent_pte) { if (!parent_pte) return; pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes); } static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) { pte_list_remove(kvm, parent_pte, &sp->parent_ptes); } static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) { mmu_page_remove_parent_pte(kvm, sp, parent_pte); mmu_spte_clear_no_track(parent_pte); } static void mark_unsync(u64 *spte); static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) { u64 *sptep; struct rmap_iterator iter; for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { mark_unsync(sptep); } } static void mark_unsync(u64 *spte) { struct kvm_mmu_page *sp; sp = sptep_to_sp(spte); if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap)) return; if (sp->unsync_children++) return; kvm_mmu_mark_parents_unsync(sp); } #define KVM_PAGE_ARRAY_NR 16 struct kvm_mmu_pages { struct mmu_page_and_offset { struct kvm_mmu_page *sp; unsigned int idx; } page[KVM_PAGE_ARRAY_NR]; unsigned int nr; }; static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx) { int i; if (sp->unsync) for (i=0; i < pvec->nr; i++) if (pvec->page[i].sp == sp) return 0; pvec->page[pvec->nr].sp = sp; pvec->page[pvec->nr].idx = idx; pvec->nr++; return (pvec->nr == KVM_PAGE_ARRAY_NR); } static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) { --sp->unsync_children; WARN_ON_ONCE((int)sp->unsync_children < 0); __clear_bit(idx, sp->unsync_child_bitmap); } static int __mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { int i, ret, nr_unsync_leaf = 0; for_each_set_bit(i, sp->unsync_child_bitmap, 512) { struct kvm_mmu_page *child; u64 ent = sp->spt[i]; if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { clear_unsync_child_bit(sp, i); continue; } child = spte_to_child_sp(ent); if (child->unsync_children) { if (mmu_pages_add(pvec, child, i)) return -ENOSPC; ret = __mmu_unsync_walk(child, pvec); if (!ret) { clear_unsync_child_bit(sp, i); continue; } else if (ret > 0) { nr_unsync_leaf += ret; } else return ret; } else if (child->unsync) { nr_unsync_leaf++; if (mmu_pages_add(pvec, child, i)) return -ENOSPC; } else clear_unsync_child_bit(sp, i); } return nr_unsync_leaf; } #define INVALID_INDEX (-1) static int mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { pvec->nr = 0; if (!sp->unsync_children) return 0; mmu_pages_add(pvec, sp, INVALID_INDEX); return __mmu_unsync_walk(sp, pvec); } static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { WARN_ON_ONCE(!sp->unsync); trace_kvm_mmu_sync_page(sp); sp->unsync = 0; --kvm->stat.mmu_unsync; } static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list); static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list); static bool sp_has_gptes(struct kvm_mmu_page *sp) { if (sp->role.direct) return false; if (sp->role.passthrough) return false; return true; } #define for_each_valid_sp(_kvm, _sp, _list) \ hlist_for_each_entry(_sp, _list, hash_link) \ if (is_obsolete_sp((_kvm), (_sp))) { \ } else #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ for_each_valid_sp(_kvm, _sp, \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; /* * Ignore various flags when verifying that it's safe to sync a shadow * page using the current MMU context. * * - level: not part of the overall MMU role and will never match as the MMU's * level tracks the root level * - access: updated based on the new guest PTE * - quadrant: not part of the overall MMU role (similar to level) */ const union kvm_mmu_page_role sync_role_ign = { .level = 0xf, .access = 0x7, .quadrant = 0x3, .passthrough = 0x1, }; /* * Direct pages can never be unsync, and KVM should never attempt to * sync a shadow page for a different MMU context, e.g. if the role * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * reserved bits checks will be wrong, etc... */ if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return false; return true; } static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) { /* sp->spt[i] has initial value of shadow page table allocation */ if (sp->spt[i] == SHADOW_NONPRESENT_VALUE) return 0; return vcpu->arch.mmu->sync_spte(vcpu, sp, i); } static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int flush = 0; int i; if (!kvm_sync_page_check(vcpu, sp)) return -1; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { int ret = kvm_sync_spte(vcpu, sp, i); if (ret < -1) return -1; flush |= ret; } /* * Note, any flush is purely for KVM's correctness, e.g. when dropping * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier * unmap or dirty logging event doesn't fail to flush. The guest is * responsible for flushing the TLB to ensure any changes in protection * bits are recognized, i.e. until the guest flushes or page faults on * a relevant address, KVM is architecturally allowed to let vCPUs use * cached translations with the old protection bits. */ return flush; } static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { int ret = __kvm_sync_page(vcpu, sp); if (ret < 0) kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return ret; } static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, struct list_head *invalid_list, bool remote_flush) { if (!remote_flush && list_empty(invalid_list)) return false; if (!list_empty(invalid_list)) kvm_mmu_commit_zap_page(kvm, invalid_list); else kvm_flush_remote_tlbs(kvm); return true; } static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { if (sp->role.invalid) return true; /* TDP MMU pages do not use the MMU generation. */ return !is_tdp_mmu_page(sp) && unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); } struct mmu_page_path { struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; unsigned int idx[PT64_ROOT_MAX_LEVEL]; }; #define for_each_sp(pvec, sp, parents, i) \ for (i = mmu_pages_first(&pvec, &parents); \ i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) static int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, int i) { int n; for (n = i+1; n < pvec->nr; n++) { struct kvm_mmu_page *sp = pvec->page[n].sp; unsigned idx = pvec->page[n].idx; int level = sp->role.level; parents->idx[level-1] = idx; if (level == PG_LEVEL_4K) break; parents->parent[level-2] = sp; } return n; } static int mmu_pages_first(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents) { struct kvm_mmu_page *sp; int level; if (pvec->nr == 0) return 0; WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX); sp = pvec->page[0].sp; level = sp->role.level; WARN_ON_ONCE(level == PG_LEVEL_4K); parents->parent[level-2] = sp; /* Also set up a sentinel. Further entries in pvec are all * children of sp, so this element is never overwritten. */ parents->parent[level-1] = NULL; return mmu_pages_next(pvec, parents, 0); } static void mmu_pages_clear_parents(struct mmu_page_path *parents) { struct kvm_mmu_page *sp; unsigned int level = 0; do { unsigned int idx = parents->idx[level]; sp = parents->parent[level]; if (!sp) return; WARN_ON_ONCE(idx == INVALID_INDEX); clear_unsync_child_bit(sp, idx); level++; } while (!sp->unsync_children); } static int mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent, bool can_yield) { int i; struct kvm_mmu_page *sp; struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); bool flush = false; while (mmu_unsync_walk(parent, &pages)) { bool protected = false; for_each_sp(pages, sp, parents, i) protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn); if (protected) { kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true); flush = false; } for_each_sp(pages, sp, parents, i) { kvm_unlink_unsync_page(vcpu->kvm, sp); flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0; mmu_pages_clear_parents(&parents); } if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); if (!can_yield) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); return -EINTR; } cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); flush = false; } } kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); return 0; } static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) { atomic_set(&sp->write_flooding_count, 0); } static void clear_sp_write_flooding_count(u64 *spte) { __clear_sp_write_flooding_count(sptep_to_sp(spte)); } /* * The vCPU is required when finding indirect shadow pages; the shadow * page may already exist and syncing it needs the vCPU pointer in * order to read guest page tables. Direct shadow pages are never * unsync, thus @vcpu can be NULL if @role.direct is true. */ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) { struct kvm_mmu_page *sp; int ret; int collisions = 0; LIST_HEAD(invalid_list); for_each_valid_sp(kvm, sp, sp_list) { if (sp->gfn != gfn) { collisions++; continue; } if (sp->role.word != role.word) { /* * If the guest is creating an upper-level page, zap * unsync pages for the same gfn. While it's possible * the guest is using recursive page tables, in all * likelihood the guest has stopped using the unsync * page and is installing a completely unrelated page. * Unsync pages must not be left as is, because the new * upper-level page will be write-protected. */ if (role.level > PG_LEVEL_4K && sp->unsync) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); continue; } /* unsync and write-flooding only apply to indirect SPs. */ if (sp->role.direct) goto out; if (sp->unsync) { if (KVM_BUG_ON(!vcpu, kvm)) break; /* * The page is good, but is stale. kvm_sync_page does * get the latest guest state, but (unlike mmu_unsync_children) * it doesn't write-protect the page or mark it synchronized! * This way the validity of the mapping is ensured, but the * overhead of write protection is not incurred until the * guest invalidates the TLB mapping. This allows multiple * SPs for a single gfn to be unsync. * * If the sync fails, the page is zapped. If so, break * in order to rebuild it. */ ret = kvm_sync_page(vcpu, sp, &invalid_list); if (ret < 0) break; WARN_ON_ONCE(!list_empty(&invalid_list)); if (ret > 0) kvm_flush_remote_tlbs(kvm); } __clear_sp_write_flooding_count(sp); goto out; } sp = NULL; ++kvm->stat.mmu_cache_miss; out: kvm_mmu_commit_zap_page(kvm, &invalid_list); if (collisions > kvm->stat.max_mmu_page_hash_collisions) kvm->stat.max_mmu_page_hash_collisions = collisions; return sp; } /* Caches used when allocating a new shadow page. */ struct shadow_page_caches { struct kvm_mmu_memory_cache *page_header_cache; struct kvm_mmu_memory_cache *shadow_page_cache; struct kvm_mmu_memory_cache *shadowed_info_cache; }; static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) { struct kvm_mmu_page *sp; sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache); sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache); if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL) sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); /* * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() * depends on valid pages being added to the head of the list. See * comments in kvm_zap_obsolete_pages(). */ sp->mmu_valid_gen = kvm->arch.mmu_valid_gen; list_add(&sp->link, &kvm->arch.active_mmu_pages); kvm_account_mmu_page(kvm, sp); sp->gfn = gfn; sp->role = role; hlist_add_head(&sp->hash_link, sp_list); if (sp_has_gptes(sp)) account_shadowed(kvm, sp); return sp; } /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role) { struct hlist_head *sp_list; struct kvm_mmu_page *sp; bool created = false; sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role); if (!sp) { created = true; sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role); } trace_kvm_mmu_get_page(sp, created); return sp; } static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role) { struct shadow_page_caches caches = { .page_header_cache = &vcpu->arch.mmu_page_header_cache, .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache, .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache, }; return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role); } static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access) { struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep); union kvm_mmu_page_role role; role = parent_sp->role; role.level--; role.access = access; role.direct = direct; role.passthrough = 0; /* * If the guest has 4-byte PTEs then that means it's using 32-bit, * 2-level, non-PAE paging. KVM shadows such guests with PAE paging * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must * shadow each guest page table with multiple shadow page tables, which * requires extra bookkeeping in the role. * * Specifically, to shadow the guest's page directory (which covers a * 4GiB address space), KVM uses 4 PAE page directories, each mapping * 1GiB of the address space. @role.quadrant encodes which quarter of * the address space each maps. * * To shadow the guest's page tables (which each map a 4MiB region), KVM * uses 2 PAE page tables, each mapping a 2MiB region. For these, * @role.quadrant encodes which half of the region they map. * * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE * consumes bits 29:21. To consume bits 31:30, KVM's uses 4 shadow * PDPTEs; those 4 PAE page directories are pre-allocated and their * quadrant is assigned in mmu_alloc_root(). A 4-byte PTE consumes * bits 21:12, while an 8-byte PTE consumes bits 20:12. To consume * bit 21 in the PTE (the child here), KVM propagates that bit to the * quadrant, i.e. sets quadrant to '0' or '1'. The parent 8-byte PDE * covers bit 21 (see above), thus the quadrant is calculated from the * _least_ significant bit of the PDE index. */ if (role.has_4_byte_gpte) { WARN_ON_ONCE(role.level != PG_LEVEL_4K); role.quadrant = spte_index(sptep) & 1; } return role; } static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, bool direct, unsigned int access) { union kvm_mmu_page_role role; if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) return ERR_PTR(-EEXIST); role = kvm_mmu_child_role(sptep, direct, access); return kvm_mmu_get_shadow_page(vcpu, gfn, role); } static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, hpa_t root, u64 addr) { iterator->addr = addr; iterator->shadow_addr = root; iterator->level = vcpu->arch.mmu->root_role.level; if (iterator->level >= PT64_ROOT_4LEVEL && vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL && !vcpu->arch.mmu->root_role.direct) iterator->level = PT32E_ROOT_LEVEL; if (iterator->level == PT32E_ROOT_LEVEL) { /* * prev_root is currently only used for 64-bit hosts. So only * the active root_hpa is valid here. */ BUG_ON(root != vcpu->arch.mmu->root.hpa); iterator->shadow_addr = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; iterator->shadow_addr &= SPTE_BASE_ADDR_MASK; --iterator->level; if (!iterator->shadow_addr) iterator->level = 0; } } static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr) { shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa, addr); } static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) { if (iterator->level < PG_LEVEL_4K) return false; iterator->index = SPTE_INDEX(iterator->addr, iterator->level); iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; return true; } static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) { if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) { iterator->level = 0; return; } iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK; --iterator->level; } static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) { __shadow_walk_next(iterator, *iterator->sptep); } static void __link_shadow_page(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, u64 *sptep, struct kvm_mmu_page *sp, bool flush) { u64 spte; BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); /* * If an SPTE is present already, it must be a leaf and therefore * a large one. Drop it, and flush the TLB if needed, before * installing sp. */ if (is_shadow_present_pte(*sptep)) drop_large_spte(kvm, sptep, flush); spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); mmu_spte_set(sptep, spte); mmu_page_add_parent_pte(kvm, cache, sp, sptep); /* * The non-direct sub-pagetable must be updated before linking. For * L1 sp, the pagetable is updated via kvm_sync_page() in * kvm_mmu_find_shadow_page() without write-protecting the gfn, * so sp->unsync can be true or false. For higher level non-direct * sp, the pagetable is updated/synced via mmu_sync_children() in * FNAME(fetch)(), so sp->unsync_children can only be false. * WARN_ON_ONCE() if anything happens unexpectedly. */ if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync) mark_unsync(sptep); } static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp) { __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true); } static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) { if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { struct kvm_mmu_page *child; /* * For the direct sp, if the guest pte's dirty bit * changed form clean to dirty, it will corrupt the * sp's access: allow writable in the read-only sp, * so we should update the spte at this point to get * a new sp with the correct access. */ child = spte_to_child_sp(*sptep); if (child->role.access == direct_access) return; drop_parent_pte(vcpu->kvm, child, sptep); kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); } } /* Returns the number of zapped non-leaf child shadow pages. */ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list) { u64 pte; struct kvm_mmu_page *child; pte = *spte; if (is_shadow_present_pte(pte)) { if (is_last_spte(pte, sp->role.level)) { drop_spte(kvm, spte); } else { child = spte_to_child_sp(pte); drop_parent_pte(kvm, child, spte); /* * Recursively zap nested TDP SPs, parentless SPs are * unlikely to be used again in the near future. This * avoids retaining a large number of stale nested SPs. */ if (tdp_enabled && invalid_list && child->role.guest_mode && !atomic_long_read(&child->parent_ptes.val)) return kvm_mmu_prepare_zap_page(kvm, child, invalid_list); } } else if (is_mmio_spte(kvm, pte)) { mmu_spte_clear_no_track(spte); } return 0; } static int kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list) { int zapped = 0; unsigned i; for (i = 0; i < SPTE_ENT_PER_PAGE; ++i) zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); return zapped; } static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) { u64 *sptep; struct rmap_iterator iter; while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) drop_parent_pte(kvm, sp, sptep); } static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *parent, struct list_head *invalid_list) { int i, zapped = 0; struct mmu_page_path parents; struct kvm_mmu_pages pages; if (parent->role.level == PG_LEVEL_4K) return 0; while (mmu_unsync_walk(parent, &pages)) { struct kvm_mmu_page *sp; for_each_sp(pages, sp, parents, i) { kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); mmu_pages_clear_parents(&parents); zapped++; } } return zapped; } static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list, int *nr_zapped) { bool list_unstable, zapped_root = false; lockdep_assert_held_write(&kvm->mmu_lock); trace_kvm_mmu_prepare_zap_page(sp); ++kvm->stat.mmu_shadow_zapped; *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); kvm_mmu_unlink_parents(kvm, sp); /* Zapping children means active_mmu_pages has become unstable. */ list_unstable = *nr_zapped; if (!sp->role.invalid && sp_has_gptes(sp)) unaccount_shadowed(kvm, sp); if (sp->unsync) kvm_unlink_unsync_page(kvm, sp); if (!sp->root_count) { /* Count self */ (*nr_zapped)++; /* * Already invalid pages (previously active roots) are not on * the active page list. See list_del() in the "else" case of * !sp->root_count. */ if (sp->role.invalid) list_add(&sp->link, invalid_list); else list_move(&sp->link, invalid_list); kvm_unaccount_mmu_page(kvm, sp); } else { /* * Remove the active root from the active page list, the root * will be explicitly freed when the root_count hits zero. */ list_del(&sp->link); /* * Obsolete pages cannot be used on any vCPUs, see the comment * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also * treats invalid shadow pages as being obsolete. */ zapped_root = !is_obsolete_sp(kvm, sp); } if (sp->nx_huge_page_disallowed) unaccount_nx_huge_page(kvm, sp); sp->role.invalid = 1; /* * Make the request to free obsolete roots after marking the root * invalid, otherwise other vCPUs may not see it as invalid. */ if (zapped_root) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS); return list_unstable; } static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list) { int nr_zapped; __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); return nr_zapped; } static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list) { struct kvm_mmu_page *sp, *nsp; if (list_empty(invalid_list)) return; /* * We need to make sure everyone sees our modifications to * the page tables and see changes to vcpu->mode here. The barrier * in the kvm_flush_remote_tlbs() achieves this. This pairs * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. * * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit * guest mode and/or lockless shadow page table walks. */ kvm_flush_remote_tlbs(kvm); list_for_each_entry_safe(sp, nsp, invalid_list, link) { WARN_ON_ONCE(!sp->role.invalid || sp->root_count); kvm_mmu_free_shadow_page(sp); } } static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, unsigned long nr_to_zap) { unsigned long total_zapped = 0; struct kvm_mmu_page *sp, *tmp; LIST_HEAD(invalid_list); bool unstable; int nr_zapped; if (list_empty(&kvm->arch.active_mmu_pages)) return 0; restart: list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { /* * Don't zap active root pages, the page itself can't be freed * and zapping it will just force vCPUs to realloc and reload. */ if (sp->root_count) continue; unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &nr_zapped); total_zapped += nr_zapped; if (total_zapped >= nr_to_zap) break; if (unstable) goto restart; } kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm->stat.mmu_recycled += total_zapped; return total_zapped; } static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) { if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) return kvm->arch.n_max_mmu_pages - kvm->arch.n_used_mmu_pages; return 0; } static int make_mmu_pages_available(struct kvm_vcpu *vcpu) { unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); if (likely(avail >= KVM_MIN_FREE_MMU_PAGES)) return 0; kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); /* * Note, this check is intentionally soft, it only guarantees that one * page is available, while the caller may end up allocating as many as * four pages, e.g. for PAE roots or for 5-level paging. Temporarily * exceeding the (arbitrary by default) limit will not harm the host, * being too aggressive may unnecessarily kill the guest, and getting an * exact count is far more trouble than it's worth, especially in the * page fault paths. */ if (!kvm_mmu_available_pages(vcpu->kvm)) return -ENOSPC; return 0; } /* * Changing the number of mmu pages allocated to the vm * Note: if goal_nr_mmu_pages is too small, you will get dead lock */ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) { write_lock(&kvm->mmu_lock); if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - goal_nr_mmu_pages); goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; } kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; write_unlock(&kvm->mmu_lock); } bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, bool always_retry) { struct kvm *kvm = vcpu->kvm; LIST_HEAD(invalid_list); struct kvm_mmu_page *sp; gpa_t gpa = cr2_or_gpa; bool r = false; /* * Bail early if there aren't any write-protected shadow pages to avoid * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked * by a third party. Reading indirect_shadow_pages without holding * mmu_lock is safe, as this is purely an optimization, i.e. a false * positive is benign, and a false negative will simply result in KVM * skipping the unprotect+retry path, which is also an optimization. */ if (!READ_ONCE(kvm->arch.indirect_shadow_pages)) goto out; if (!vcpu->arch.mmu->root_role.direct) { gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); if (gpa == INVALID_GPA) goto out; } write_lock(&kvm->mmu_lock); for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa)) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); /* * Snapshot the result before zapping, as zapping will remove all list * entries, i.e. checking the list later would yield a false negative. */ r = !list_empty(&invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list); write_unlock(&kvm->mmu_lock); out: if (r || always_retry) { vcpu->arch.last_retry_eip = kvm_rip_read(vcpu); vcpu->arch.last_retry_addr = cr2_or_gpa; } return r; } static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { trace_kvm_mmu_unsync_page(sp); ++kvm->stat.mmu_unsync; sp->unsync = 1; kvm_mmu_mark_parents_unsync(sp); } /* * Attempt to unsync any shadow pages that can be reached by the specified gfn, * KVM is creating a writable mapping for said gfn. Returns 0 if all pages * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool synchronizing, bool prefetch) { struct kvm_mmu_page *sp; bool locked = false; /* * Force write-protection if the page is being tracked. Note, the page * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ if (kvm_gfn_is_write_tracked(kvm, slot, gfn)) return -EPERM; /* * The page is not write-tracked, mark existing shadow pages unsync * unless KVM is synchronizing an unsync SP. In that case, KVM must * complete emulation of the guest TLB flush before allowing shadow * pages to become unsync (writable by the guest). */ for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { if (synchronizing) return -EPERM; if (sp->unsync) continue; if (prefetch) return -EEXIST; /* * TDP MMU page faults require an additional spinlock as they * run with mmu_lock held for read, not write, and the unsync * logic is not thread safe. Take the spinklock regardless of * the MMU type to avoid extra conditionals/parameters, there's * no meaningful penalty if mmu_lock is held for write. */ if (!locked) { locked = true; spin_lock(&kvm->arch.mmu_unsync_pages_lock); /* * Recheck after taking the spinlock, a different vCPU * may have since marked the page unsync. A false * negative on the unprotected check above is not * possible as clearing sp->unsync _must_ hold mmu_lock * for write, i.e. unsync cannot transition from 1->0 * while this CPU holds mmu_lock for read (or write). */ if (READ_ONCE(sp->unsync)) continue; } WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); kvm_unsync_page(kvm, sp); } if (locked) spin_unlock(&kvm->arch.mmu_unsync_pages_lock); /* * We need to ensure that the marking of unsync pages is visible * before the SPTE is updated to allow writes because * kvm_mmu_sync_roots() checks the unsync flags without holding * the MMU lock and so can race with this. If the SPTE was updated * before the page had been marked as unsync-ed, something like the * following could happen: * * CPU 1 CPU 2 * --------------------------------------------------------------------- * 1.2 Host updates SPTE * to be writable * 2.1 Guest writes a GPTE for GVA X. * (GPTE being in the guest page table shadowed * by the SP from CPU 1.) * This reads SPTE during the page table walk. * Since SPTE.W is read as 1, there is no * fault. * * 2.2 Guest issues TLB flush. * That causes a VM Exit. * * 2.3 Walking of unsync pages sees sp->unsync is * false and skips the page. * * 2.4 Guest accesses GVA X. * Since the mapping in the SP was not updated, * so the old mapping for GVA X incorrectly * gets used. * 1.1 Host marks SP * as unsync * (sp->unsync = true) * * The write barrier below ensures that 1.1 happens before 1.2 and thus * the situation in 2.4 does not arise. It pairs with the read barrier * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3. */ smp_wmb(); return 0; } static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); int level = sp->role.level; int was_rmapped = 0; int ret = RET_PF_FIXED; bool flush = false; bool wrprot; u64 spte; /* Prefetching always gets a writable pfn. */ bool host_writable = !fault || fault->map_writable; bool prefetch = !fault || fault->prefetch; bool write_fault = fault && fault->write; if (unlikely(is_noslot_pfn(pfn))) { vcpu->stat.pf_mmio_spte_created++; mark_mmio_spte(vcpu, sptep, gfn, pte_access); return RET_PF_EMULATE; } if (is_shadow_present_pte(*sptep)) { if (prefetch) return RET_PF_SPURIOUS; /* * If we overwrite a PTE page pointer with a 2MB PMD, unlink * the parent of the now unreachable PTE. */ if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { struct kvm_mmu_page *child; u64 pte = *sptep; child = spte_to_child_sp(pte); drop_parent_pte(vcpu->kvm, child, sptep); flush = true; } else if (pfn != spte_to_pfn(*sptep)) { drop_spte(vcpu->kvm, sptep); flush = true; } else was_rmapped = 1; } wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, false, host_writable, &spte); if (*sptep == spte) { ret = RET_PF_SPURIOUS; } else { flush |= mmu_spte_update(sptep, spte); trace_kvm_mmu_set_spte(level, gfn, sptep); } if (wrprot && write_fault) ret = RET_PF_WRITE_PROTECTED; if (flush) kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); if (!was_rmapped) { WARN_ON_ONCE(ret == RET_PF_SPURIOUS); rmap_add(vcpu, slot, sptep, gfn, pte_access); } else { /* Already rmapped but the pte_access bits may have changed. */ kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access); } return ret; } static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep, int nr_pages, unsigned int access) { struct page *pages[PTE_PREFETCH_NUM]; struct kvm_memory_slot *slot; int i; if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM)) return false; slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); if (!slot) return false; nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages); if (nr_pages <= 0) return false; for (i = 0; i < nr_pages; i++, gfn++, sptep++) { mmu_set_spte(vcpu, slot, sptep, access, gfn, page_to_pfn(pages[i]), NULL); /* * KVM always prefetches writable pages from the primary MMU, * and KVM can make its SPTE writable in the fast page handler, * without notifying the primary MMU. Mark pages/folios dirty * now to ensure file data is written back if it ends up being * written by the guest. Because KVM's prefetching GUPs * writable PTEs, the probability of unnecessary writeback is * extremely low. */ kvm_release_page_dirty(pages[i]); } return true; } static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) { gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start)); unsigned int access = sp->role.access; return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access); } static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep) { u64 *spte, *start = NULL; int i; WARN_ON_ONCE(!sp->role.direct); i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); spte = sp->spt + i; for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { if (is_shadow_present_pte(*spte) || spte == sptep) { if (!start) continue; if (!direct_pte_prefetch_many(vcpu, sp, start, spte)) return; start = NULL; } else if (!start) start = spte; } if (start) direct_pte_prefetch_many(vcpu, sp, start, spte); } static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) { struct kvm_mmu_page *sp; sp = sptep_to_sp(sptep); /* * Without accessed bits, there's no way to distinguish between * actually accessed translations and prefetched, so disable pte * prefetch if accessed bits aren't available. */ if (sp_ad_disabled(sp)) return; if (sp->role.level > PG_LEVEL_4K) return; /* * If addresses are being invalidated, skip prefetching to avoid * accidentally prefetching those addresses. */ if (unlikely(vcpu->kvm->mmu_invalidate_in_progress)) return; __direct_pte_prefetch(vcpu, sp, sptep); } /* * Lookup the mapping level for @gfn in the current mm. * * WARNING! Use of host_pfn_mapping_level() requires the caller and the end * consumer to be tied into KVM's handlers for MMU notifier events! * * There are several ways to safely use this helper: * * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before * consuming it. In this case, mmu_lock doesn't need to be held during the * lookup, but it does need to be held while checking the MMU notifier. * * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation * event for the hva. This can be done by explicit checking the MMU notifier * or by ensuring that KVM already has a valid mapping that covers the hva. * * - Do not use the result to install new mappings, e.g. use the host mapping * level only to decide whether or not to zap an entry. In this case, it's * not required to hold mmu_lock (though it's highly likely the caller will * want to hold mmu_lock anyways, e.g. to modify SPTEs). * * Note! The lookup can still race with modifications to host page tables, but * the above "rules" ensure KVM will not _consume_ the result of the walk if a * race with the primary MMU occurs. */ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) { int level = PG_LEVEL_4K; unsigned long hva; unsigned long flags; pgd_t pgd; p4d_t p4d; pud_t pud; pmd_t pmd; /* * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() * is not solely for performance, it's also necessary to avoid the * "writable" check in __gfn_to_hva_many(), which will always fail on * read-only memslots due to gfn_to_hva() assuming writes. Earlier * page fault steps have already verified the guest isn't writing a * read-only memslot. */ hva = __gfn_to_hva_memslot(slot, gfn); /* * Disable IRQs to prevent concurrent tear down of host page tables, * e.g. if the primary MMU promotes a P*D to a huge page and then frees * the original page table. */ local_irq_save(flags); /* * Read each entry once. As above, a non-leaf entry can be promoted to * a huge page _during_ this walk. Re-reading the entry could send the * walk into the weeks, e.g. p*d_leaf() returns false (sees the old * value) and then p*d_offset() walks into the target huge page instead * of the old page table (sees the new value). */ pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); if (pgd_none(pgd)) goto out; p4d = READ_ONCE(*p4d_offset(&pgd, hva)); if (p4d_none(p4d) || !p4d_present(p4d)) goto out; pud = READ_ONCE(*pud_offset(&p4d, hva)); if (pud_none(pud) || !pud_present(pud)) goto out; if (pud_leaf(pud)) { level = PG_LEVEL_1G; goto out; } pmd = READ_ONCE(*pmd_offset(&pud, hva)); if (pmd_none(pmd) || !pmd_present(pmd)) goto out; if (pmd_leaf(pmd)) level = PG_LEVEL_2M; out: local_irq_restore(flags); return level; } static int __kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level, bool is_private) { struct kvm_lpage_info *linfo; int host_level; max_level = min(max_level, max_huge_page_level); for ( ; max_level > PG_LEVEL_4K; max_level--) { linfo = lpage_info_slot(gfn, slot, max_level); if (!linfo->disallow_lpage) break; } if (is_private) return max_level; if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; host_level = host_pfn_mapping_level(kvm, gfn, slot); return min(host_level, max_level); } int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) { bool is_private = kvm_slot_can_be_private(slot) && kvm_mem_is_private(kvm, gfn); return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private); } void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot = fault->slot; kvm_pfn_t mask; fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled; if (unlikely(fault->max_level == PG_LEVEL_4K)) return; if (is_error_noslot_pfn(fault->pfn)) return; if (kvm_slot_dirty_track_enabled(slot)) return; /* * Enforce the iTLB multihit workaround after capturing the requested * level, which will be used to do precise, accurate accounting. */ fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot, fault->gfn, fault->max_level, fault->is_private); if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed) return; /* * mmu_invalidate_retry() was successful and mmu_lock is held, so * the pmd can't be split from under us. */ fault->goal_level = fault->req_level; mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1; VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask)); fault->pfn &= ~mask; } void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level) { if (cur_level > PG_LEVEL_4K && cur_level == fault->goal_level && is_shadow_present_pte(spte) && !is_large_pte(spte) && spte_to_child_sp(spte)->nx_huge_page_disallowed) { /* * A small SPTE exists for this pfn, but FNAME(fetch), * direct_map(), or kvm_tdp_mmu_map() would like to create a * large PTE instead: just force them to go down another level, * patching back for them into pfn the next 9 bits of the * address. */ u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) - KVM_PAGES_PER_HPAGE(cur_level - 1); fault->pfn |= fault->gfn & page_mask; fault->goal_level--; } } static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; int ret; gfn_t base_gfn = fault->gfn; kvm_mmu_hugepage_adjust(vcpu, fault); trace_kvm_mmu_spte_requested(fault); for_each_shadow_entry(vcpu, fault->addr, it) { /* * We cannot overwrite existing page tables with an NX * large page, as the leaf could be executable. */ if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(fault, *it.sptep, it.level); base_gfn = gfn_round_for_level(fault->gfn, it.level); if (it.level == fault->goal_level) break; sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL); if (sp == ERR_PTR(-EEXIST)) continue; link_shadow_page(vcpu, it.sptep, sp); if (fault->huge_page_disallowed) account_nx_huge_page(vcpu->kvm, sp, fault->req_level >= it.level); } if (WARN_ON_ONCE(it.level != fault->goal_level)) return -EFAULT; ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL, base_gfn, fault->pfn, fault); if (ret == RET_PF_SPURIOUS) return ret; direct_pte_prefetch(vcpu, it.sptep); return ret; } static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) { unsigned long hva = gfn_to_hva_memslot(slot, gfn); send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current); } static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { if (is_sigpending_pfn(fault->pfn)) { kvm_handle_signal_exit(vcpu); return -EINTR; } /* * Do not cache the mmio info caused by writing the readonly gfn * into the spte otherwise read access on readonly gfn also can * caused mmio page fault and treat it as mmio access. */ if (fault->pfn == KVM_PFN_ERR_RO_FAULT) return RET_PF_EMULATE; if (fault->pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(fault->slot, fault->gfn); return RET_PF_RETRY; } return -EFAULT; } static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) { gva_t gva = fault->is_tdp ? 0 : fault->addr; if (fault->is_private) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } vcpu_cache_mmio_info(vcpu, gva, fault->gfn, access & shadow_mmio_access_mask); fault->slot = NULL; fault->pfn = KVM_PFN_NOSLOT; fault->map_writable = false; /* * If MMIO caching is disabled, emulate immediately without * touching the shadow page tables as attempting to install an * MMIO SPTE will just be an expensive nop. */ if (unlikely(!enable_mmio_caching)) return RET_PF_EMULATE; /* * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR, * any guest that generates such gfns is running nested and is being * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and * only if L1's MAXPHYADDR is inaccurate with respect to the * hardware's). */ if (unlikely(fault->gfn > kvm_mmu_max_gfn())) return RET_PF_EMULATE; return RET_PF_CONTINUE; } static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault) { /* * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only * reach the common page fault handler if the SPTE has an invalid MMIO * generation number. Refreshing the MMIO generation needs to go down * the slow path. Note, EPT Misconfigs do NOT set the PRESENT flag! */ if (fault->rsvd) return false; /* * For hardware-protected VMs, certain conditions like attempting to * perform a write to a page which is not in the state that the guest * expects it to be in can result in a nested/extended #PF. In this * case, the below code might misconstrue this situation as being the * result of a write-protected access, and treat it as a spurious case * rather than taking any action to satisfy the real source of the #PF * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the * guest spinning on a #PF indefinitely, so don't attempt the fast path * in this case. * * Note that the kvm_mem_is_private() check might race with an * attribute update, but this will either result in the guest spinning * on RET_PF_SPURIOUS until the update completes, or an actual spurious * case might go down the slow path. Either case will resolve itself. */ if (kvm->arch.has_private_mem && fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) return false; /* * #PF can be fast if: * * 1. The shadow page table entry is not present and A/D bits are * disabled _by KVM_, which could mean that the fault is potentially * caused by access tracking (if enabled). If A/D bits are enabled * by KVM, but disabled by L1 for L2, KVM is forced to disable A/D * bits for L2 and employ access tracking, but the fast page fault * mechanism only supports direct MMUs. * 2. The shadow page table entry is present, the access is a write, * and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e. * the fault was caused by a write-protection violation. If the * SPTE is MMU-writable (determined later), the fault can be fixed * by setting the Writable bit, which can be done out of mmu_lock. */ if (!fault->present) return !kvm_ad_enabled; /* * Note, instruction fetches and writes are mutually exclusive, ignore * the "exec" flag. */ return fault->write; } /* * Returns true if the SPTE was fixed successfully. Otherwise, * someone else modified the SPTE from its original value. */ static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, u64 new_spte) { /* * Theoretically we could also set dirty bit (and flush TLB) here in * order to eliminate unnecessary PML logging. See comments in * set_spte. But fast_page_fault is very unlikely to happen with PML * enabled, so we do not do this. This might result in the same GPA * to be logged in PML buffer again when the write really happens, and * eventually to be called by mark_page_dirty twice. But it's also no * harm. This also avoids the TLB flush needed after setting dirty bit * so non-PML cases won't be impacted. * * Compare with make_spte() where instead shadow_dirty_mask is set. */ if (!try_cmpxchg64(sptep, &old_spte, new_spte)) return false; if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn); return true; } /* * Returns the last level spte pointer of the shadow page walk for the given * gpa, and sets *spte to the spte value. This spte may be non-preset. If no * walk could be performed, returns NULL and *spte does not contain valid data. * * Contract: * - Must be called between walk_shadow_page_lockless_{begin,end}. * - The returned sptep must not be used after walk_shadow_page_lockless_end. */ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) { struct kvm_shadow_walk_iterator iterator; u64 old_spte; u64 *sptep = NULL; for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) { sptep = iterator.sptep; *spte = old_spte; } return sptep; } /* * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. */ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp; int ret = RET_PF_INVALID; u64 spte; u64 *sptep; uint retry_count = 0; if (!page_fault_can_be_fast(vcpu->kvm, fault)) return ret; walk_shadow_page_lockless_begin(vcpu); do { u64 new_spte; if (tdp_mmu_enabled) sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte); else sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); /* * It's entirely possible for the mapping to have been zapped * by a different task, but the root page should always be * available as the vCPU holds a reference to its root(s). */ if (WARN_ON_ONCE(!sptep)) spte = FROZEN_SPTE; if (!is_shadow_present_pte(spte)) break; sp = sptep_to_sp(sptep); if (!is_last_spte(spte, sp->role.level)) break; /* * Check whether the memory access that caused the fault would * still cause it if it were to be performed right now. If not, * then this is a spurious fault caused by TLB lazily flushed, * or some other CPU has already fixed the PTE after the * current CPU took the fault. * * Need not check the access of upper level table entries since * they are always ACC_ALL. */ if (is_access_allowed(fault, spte)) { ret = RET_PF_SPURIOUS; break; } new_spte = spte; /* * KVM only supports fixing page faults outside of MMU lock for * direct MMUs, nested MMUs are always indirect, and KVM always * uses A/D bits for non-nested MMUs. Thus, if A/D bits are * enabled, the SPTE can't be an access-tracked SPTE. */ if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte)) new_spte = restore_acc_track_spte(new_spte) | shadow_accessed_mask; /* * To keep things simple, only SPTEs that are MMU-writable can * be made fully writable outside of mmu_lock, e.g. only SPTEs * that were write-protected for dirty-logging or access * tracking are handled here. Don't bother checking if the * SPTE is writable to prioritize running with A/D bits enabled. * The is_access_allowed() check above handles the common case * of the fault being spurious, and the SPTE is known to be * shadow-present, i.e. except for access tracking restoration * making the new SPTE writable, the check is wasteful. */ if (fault->write && is_mmu_writable_spte(spte)) { new_spte |= PT_WRITABLE_MASK; /* * Do not fix write-permission on the large spte when * dirty logging is enabled. Since we only dirty the * first page into the dirty-bitmap in * fast_pf_fix_direct_spte(), other pages are missed * if its slot has dirty logging enabled. * * Instead, we let the slow page fault path create a * normal spte to fix the access. */ if (sp->role.level > PG_LEVEL_4K && kvm_slot_dirty_track_enabled(fault->slot)) break; } /* Verify that the fault can be handled in the fast path */ if (new_spte == spte || !is_access_allowed(fault, new_spte)) break; /* * Currently, fast page fault only works for direct mapping * since the gfn is not stable for indirect shadow page. See * Documentation/virt/kvm/locking.rst to get more detail. */ if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) { ret = RET_PF_FIXED; break; } if (++retry_count > 4) { pr_warn_once("Fast #PF retrying more than 4 times.\n"); break; } } while (true); trace_fast_page_fault(vcpu, fault, sptep, spte, ret); walk_shadow_page_lockless_end(vcpu); if (ret != RET_PF_INVALID) vcpu->stat.pf_fast++; return ret; } static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, struct list_head *invalid_list) { struct kvm_mmu_page *sp; if (!VALID_PAGE(*root_hpa)) return; sp = root_to_sp(*root_hpa); if (WARN_ON_ONCE(!sp)) return; if (is_tdp_mmu_page(sp)) { lockdep_assert_held_read(&kvm->mmu_lock); kvm_tdp_mmu_put_root(kvm, sp); } else { lockdep_assert_held_write(&kvm->mmu_lock); if (!--sp->root_count && sp->role.invalid) kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); } *root_hpa = INVALID_PAGE; } /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free) { bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct; int i; LIST_HEAD(invalid_list); bool free_active_root; WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL); BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); /* Before acquiring the MMU lock, see if we need to do any real work. */ free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa); if (!free_active_root) { for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && VALID_PAGE(mmu->prev_roots[i].hpa)) break; if (i == KVM_MMU_NUM_PREV_ROOTS) return; } if (is_tdp_mmu) read_lock(&kvm->mmu_lock); else write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, &invalid_list); if (free_active_root) { if (kvm_mmu_is_dummy_root(mmu->root.hpa)) { /* Nothing to cleanup for dummy roots. */ } else if (root_to_sp(mmu->root.hpa)) { mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list); } else if (mmu->pae_root) { for (i = 0; i < 4; ++i) { if (!IS_VALID_PAE_ROOT(mmu->pae_root[i])) continue; mmu_free_root_page(kvm, &mmu->pae_root[i], &invalid_list); mmu->pae_root[i] = INVALID_PAE_ROOT; } } mmu->root.hpa = INVALID_PAGE; mmu->root.pgd = 0; } if (is_tdp_mmu) { read_unlock(&kvm->mmu_lock); WARN_ON_ONCE(!list_empty(&invalid_list)); } else { kvm_mmu_commit_zap_page(kvm, &invalid_list); write_unlock(&kvm->mmu_lock); } } EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) { unsigned long roots_to_free = 0; struct kvm_mmu_page *sp; hpa_t root_hpa; int i; /* * This should not be called while L2 is active, L2 can't invalidate * _only_ its own roots, e.g. INVVPID unconditionally exits. */ WARN_ON_ONCE(mmu->root_role.guest_mode); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { root_hpa = mmu->prev_roots[i].hpa; if (!VALID_PAGE(root_hpa)) continue; sp = root_to_sp(root_hpa); if (!sp || sp->role.guest_mode) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } kvm_mmu_free_roots(kvm, mmu, roots_to_free); } EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) { union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; struct kvm_mmu_page *sp; role.level = level; role.quadrant = quadrant; WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte); WARN_ON_ONCE(role.direct && role.has_4_byte_gpte); sp = kvm_mmu_get_shadow_page(vcpu, gfn, role); ++sp->root_count; return __pa(sp->spt); } static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; u8 shadow_root_level = mmu->root_role.level; hpa_t root; unsigned i; int r; if (tdp_mmu_enabled) { if (kvm_has_mirrored_tdp(vcpu->kvm) && !VALID_PAGE(mmu->mirror_root_hpa)) kvm_tdp_mmu_alloc_root(vcpu, true); kvm_tdp_mmu_alloc_root(vcpu, false); return 0; } write_lock(&vcpu->kvm->mmu_lock); r = make_mmu_pages_available(vcpu); if (r < 0) goto out_unlock; if (shadow_root_level >= PT64_ROOT_4LEVEL) { root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level); mmu->root.hpa = root; } else if (shadow_root_level == PT32E_ROOT_LEVEL) { if (WARN_ON_ONCE(!mmu->pae_root)) { r = -EIO; goto out_unlock; } for (i = 0; i < 4; ++i) { WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0, PT32_ROOT_LEVEL); mmu->pae_root[i] = root | PT_PRESENT_MASK | shadow_me_value; } mmu->root.hpa = __pa(mmu->pae_root); } else { WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); r = -EIO; goto out_unlock; } /* root.pgd is ignored for direct MMUs. */ mmu->root.pgd = 0; out_unlock: write_unlock(&vcpu->kvm->mmu_lock); return r; } static int mmu_first_shadow_root_alloc(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; int r = 0, i, bkt; /* * Check if this is the first shadow root being allocated before * taking the lock. */ if (kvm_shadow_root_allocated(kvm)) return 0; mutex_lock(&kvm->slots_arch_lock); /* Recheck, under the lock, whether this is the first shadow root. */ if (kvm_shadow_root_allocated(kvm)) goto out_unlock; /* * Check if anything actually needs to be allocated, e.g. all metadata * will be allocated upfront if TDP is disabled. */ if (kvm_memslots_have_rmaps(kvm) && kvm_page_track_write_tracking_enabled(kvm)) goto out_success; for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(slot, bkt, slots) { /* * Both of these functions are no-ops if the target is * already allocated, so unconditionally calling both * is safe. Intentionally do NOT free allocations on * failure to avoid having to track which allocations * were made now versus when the memslot was created. * The metadata is guaranteed to be freed when the slot * is freed, and will be kept/used if userspace retries * KVM_RUN instead of killing the VM. */ r = memslot_rmap_alloc(slot, slot->npages); if (r) goto out_unlock; r = kvm_page_track_write_tracking_alloc(slot); if (r) goto out_unlock; } } /* * Ensure that shadow_root_allocated becomes true strictly after * all the related pointers are set. */ out_success: smp_store_release(&kvm->arch.shadow_root_allocated, true); out_unlock: mutex_unlock(&kvm->slots_arch_lock); return r; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; int quadrant, i, r; hpa_t root; root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT; if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { mmu->root.hpa = kvm_mmu_get_dummy_root(); return 0; } /* * On SVM, reading PDPTRs might access guest memory, which might fault * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. */ if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { pdptrs[i] = mmu->get_pdptr(vcpu, i); if (!(pdptrs[i] & PT_PRESENT_MASK)) continue; if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT)) pdptrs[i] = 0; } } r = mmu_first_shadow_root_alloc(vcpu->kvm); if (r) return r; write_lock(&vcpu->kvm->mmu_lock); r = make_mmu_pages_available(vcpu); if (r < 0) goto out_unlock; /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { root = mmu_alloc_root(vcpu, root_gfn, 0, mmu->root_role.level); mmu->root.hpa = root; goto set_root_pgd; } if (WARN_ON_ONCE(!mmu->pae_root)) { r = -EIO; goto out_unlock; } /* * We shadow a 32 bit page table. This may be a legacy 2-level * or a PAE 3-level page table. In either case we need to be aware that * the shadow page table may be a PAE or a long mode page table. */ pm_mask = PT_PRESENT_MASK | shadow_me_value; if (mmu->root_role.level >= PT64_ROOT_4LEVEL) { pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; if (WARN_ON_ONCE(!mmu->pml4_root)) { r = -EIO; goto out_unlock; } mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; if (mmu->root_role.level == PT64_ROOT_5LEVEL) { if (WARN_ON_ONCE(!mmu->pml5_root)) { r = -EIO; goto out_unlock; } mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask; } } for (i = 0; i < 4; ++i) { WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { if (!(pdptrs[i] & PT_PRESENT_MASK)) { mmu->pae_root[i] = INVALID_PAE_ROOT; continue; } root_gfn = pdptrs[i] >> PAGE_SHIFT; } /* * If shadowing 32-bit non-PAE page tables, each PAE page * directory maps one quarter of the guest's non-PAE page * directory. Othwerise each PAE page direct shadows one guest * PAE page directory so that quadrant should be 0. */ quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0; root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL); mmu->pae_root[i] = root | pm_mask; } if (mmu->root_role.level == PT64_ROOT_5LEVEL) mmu->root.hpa = __pa(mmu->pml5_root); else if (mmu->root_role.level == PT64_ROOT_4LEVEL) mmu->root.hpa = __pa(mmu->pml4_root); else mmu->root.hpa = __pa(mmu->pae_root); set_root_pgd: mmu->root.pgd = root_pgd; out_unlock: write_unlock(&vcpu->kvm->mmu_lock); return r; } static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL; u64 *pml5_root = NULL; u64 *pml4_root = NULL; u64 *pae_root; /* * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP * tables are allocated and initialized at root creation as there is no * equivalent level in the guest's NPT to shadow. Allocate the tables * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. */ if (mmu->root_role.direct || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL || mmu->root_role.level < PT64_ROOT_4LEVEL) return 0; /* * NPT, the only paging mode that uses this horror, uses a fixed number * of levels for the shadow page tables, e.g. all MMUs are 4-level or * all MMus are 5-level. Thus, this can safely require that pml5_root * is allocated if the other roots are valid and pml5 is needed, as any * prior MMU would also have required pml5. */ if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root)) return 0; /* * The special roots should always be allocated in concert. Yell and * bail if KVM ends up in a state where only one of the roots is valid. */ if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root || (need_pml5 && mmu->pml5_root))) return -EIO; /* * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and * doesn't need to be decrypted. */ pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); if (!pae_root) return -ENOMEM; #ifdef CONFIG_X86_64 pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); if (!pml4_root) goto err_pml4; if (need_pml5) { pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); if (!pml5_root) goto err_pml5; } #endif mmu->pae_root = pae_root; mmu->pml4_root = pml4_root; mmu->pml5_root = pml5_root; return 0; #ifdef CONFIG_X86_64 err_pml5: free_page((unsigned long)pml4_root); err_pml4: free_page((unsigned long)pae_root); return -ENOMEM; #endif } static bool is_unsync_root(hpa_t root) { struct kvm_mmu_page *sp; if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root)) return false; /* * The read barrier orders the CPU's read of SPTE.W during the page table * walk before the reads of sp->unsync/sp->unsync_children here. * * Even if another CPU was marking the SP as unsync-ed simultaneously, * any guest page table changes are not guaranteed to be visible anyway * until this VCPU issues a TLB flush strictly after those changes are * made. We only need to ensure that the other CPU sets these flags * before any actual changes to the page tables are made. The comments * in mmu_try_to_unsync_pages() describe what could go wrong if this * requirement isn't satisfied. */ smp_rmb(); sp = root_to_sp(root); /* * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the * PDPTEs for a given PAE root need to be synchronized individually. */ if (WARN_ON_ONCE(!sp)) return false; if (sp->unsync || sp->unsync_children) return true; return false; } void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) { int i; struct kvm_mmu_page *sp; if (vcpu->arch.mmu->root_role.direct) return; if (!VALID_PAGE(vcpu->arch.mmu->root.hpa)) return; vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { hpa_t root = vcpu->arch.mmu->root.hpa; if (!is_unsync_root(root)) return; sp = root_to_sp(root); write_lock(&vcpu->kvm->mmu_lock); mmu_sync_children(vcpu, sp, true); write_unlock(&vcpu->kvm->mmu_lock); return; } write_lock(&vcpu->kvm->mmu_lock); for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu->pae_root[i]; if (IS_VALID_PAE_ROOT(root)) { sp = spte_to_child_sp(root); mmu_sync_children(vcpu, sp, true); } } write_unlock(&vcpu->kvm->mmu_lock); } void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu) { unsigned long roots_to_free = 0; int i; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); /* sync prev_roots by simply freeing them */ kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free); } static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t vaddr, u64 access, struct x86_exception *exception) { if (exception) exception->error_code = 0; return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception); } static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) { /* * A nested guest cannot use the MMIO cache if it is using nested * page tables, because cr2 is a nGPA while the cache stores GPAs. */ if (mmu_is_nested(vcpu)) return false; if (direct) return vcpu_match_mmio_gpa(vcpu, addr); return vcpu_match_mmio_gva(vcpu, addr); } /* * Return the level of the lowest level SPTE added to sptes. * That SPTE may be non-present. * * Must be called between walk_shadow_page_lockless_{begin,end}. */ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) { struct kvm_shadow_walk_iterator iterator; int leaf = -1; u64 spte; for (shadow_walk_init(&iterator, vcpu, addr), *root_level = iterator.level; shadow_walk_okay(&iterator); __shadow_walk_next(&iterator, spte)) { leaf = iterator.level; spte = mmu_spte_get_lockless(iterator.sptep); sptes[leaf] = spte; } return leaf; } static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) { int leaf; walk_shadow_page_lockless_begin(vcpu); if (is_tdp_mmu_active(vcpu)) leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level); else leaf = get_walk(vcpu, addr, sptes, root_level); walk_shadow_page_lockless_end(vcpu); return leaf; } /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) { u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; struct rsvd_bits_validate *rsvd_check; int root, leaf, level; bool reserved = false; leaf = get_sptes_lockless(vcpu, addr, sptes, &root); if (unlikely(leaf < 0)) { *sptep = 0ull; return reserved; } *sptep = sptes[leaf]; /* * Skip reserved bits checks on the terminal leaf if it's not a valid * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by * design, always have reserved bits set. The purpose of the checks is * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs. */ if (!is_shadow_present_pte(sptes[leaf])) leaf++; rsvd_check = &vcpu->arch.mmu->shadow_zero_check; for (level = root; level >= leaf; level--) reserved |= is_rsvd_spte(rsvd_check, sptes[level], level); if (reserved) { pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n", __func__, addr); for (level = root; level >= leaf; level--) pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx", sptes[level], level, get_rsvd_bits(rsvd_check, sptes[level], level)); } return reserved; } static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; bool reserved; if (mmio_info_in_cache(vcpu, addr, direct)) return RET_PF_EMULATE; reserved = get_mmio_spte(vcpu, addr, &spte); if (WARN_ON_ONCE(reserved)) return -EINVAL; if (is_mmio_spte(vcpu->kvm, spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); unsigned int access = get_mmio_spte_access(spte); if (!check_mmio_spte(vcpu, spte)) return RET_PF_INVALID; if (direct) addr = 0; trace_handle_mmio_page_fault(addr, gfn, access); vcpu_cache_mmio_info(vcpu, addr, gfn, access); return RET_PF_EMULATE; } /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ return RET_PF_RETRY; } static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { if (unlikely(fault->rsvd)) return false; if (!fault->present || !fault->write) return false; /* * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn)) return true; return false; } static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) { struct kvm_shadow_walk_iterator iterator; u64 spte; walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) clear_sp_write_flooding_count(iterator.sptep); walk_shadow_page_lockless_end(vcpu); } static u32 alloc_apf_token(struct kvm_vcpu *vcpu) { /* make sure the token value is not 0 */ u32 id = vcpu->arch.apf.id; if (id << 12 == 0) vcpu->arch.apf.id = 1; return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; } static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_arch_async_pf arch; arch.token = alloc_apf_token(vcpu); arch.gfn = fault->gfn; arch.error_code = fault->error_code; arch.direct_map = vcpu->arch.mmu->root_role.direct; arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu); return kvm_setup_async_pf(vcpu, fault->addr, kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch); } void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS)) return; if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) || work->wakeup_all) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu->root_role.direct && work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) return; r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL, NULL); /* * Account fixed page faults, otherwise they'll never be counted, but * ignore stats for all other return times. Page-ready "faults" aren't * truly spurious and never trigger emulation */ if (r == RET_PF_FIXED) vcpu->stat.pf_fixed++; } static inline u8 kvm_max_level_for_order(int order) { BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G); KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G)) return PG_LEVEL_1G; if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) return PG_LEVEL_2M; return PG_LEVEL_4K; } static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, u8 max_level, int gmem_order) { u8 req_max_level; if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; max_level = min(kvm_max_level_for_order(gmem_order), max_level); if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn); if (req_max_level) max_level = min(max_level, req_max_level); return max_level; } static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int r) { kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page, r == RET_PF_RETRY, fault->map_writable); } static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { int max_order, r; if (!kvm_slot_can_be_private(fault->slot)) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, &fault->refcounted_page, &max_order); if (r) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return r; } fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, fault->max_level, max_order); return RET_PF_CONTINUE; } static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { unsigned int foll = fault->write ? FOLL_WRITE : 0; if (fault->is_private) return kvm_mmu_faultin_pfn_private(vcpu, fault); foll |= FOLL_NOWAIT; fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, &fault->map_writable, &fault->refcounted_page); /* * If resolving the page failed because I/O is needed to fault-in the * page, then either set up an asynchronous #PF to do the I/O, or if * doing an async #PF isn't possible, retry with I/O allowed. All * other failures are terminal, i.e. retrying won't help. */ if (fault->pfn != KVM_PFN_ERR_NEEDS_IO) return RET_PF_CONTINUE; if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(fault->addr, fault->gfn); if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); return RET_PF_RETRY; } else if (kvm_arch_setup_async_pf(vcpu, fault)) { return RET_PF_RETRY; } } /* * Allow gup to bail on pending non-fatal signals when it's also allowed * to wait for IO. Note, gup always bails if it is unable to quickly * get a page and a fatal signal, i.e. SIGKILL, is pending. */ foll |= FOLL_INTERRUPTIBLE; foll &= ~FOLL_NOWAIT; fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, &fault->map_writable, &fault->refcounted_page); return RET_PF_CONTINUE; } static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) { struct kvm_memory_slot *slot = fault->slot; struct kvm *kvm = vcpu->kvm; int ret; if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm)) return -EFAULT; /* * Note that the mmu_invalidate_seq also serves to detect a concurrent * change in attributes. is_page_fault_stale() will detect an * invalidation relate to fault->fn and resume the guest without * installing a mapping in the page tables. */ fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; smp_rmb(); /* * Now that we have a snapshot of mmu_invalidate_seq we can check for a * private vs. shared mismatch. */ if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } if (unlikely(!slot)) return kvm_handle_noslot_fault(vcpu, fault, access); /* * Retry the page fault if the gfn hit a memslot that is being deleted * or moved. This ensures any existing SPTEs for the old memslot will * be zapped before KVM inserts a new MMIO SPTE for the gfn. */ if (slot->flags & KVM_MEMSLOT_INVALID) return RET_PF_RETRY; if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) { /* * Don't map L1's APIC access page into L2, KVM doesn't support * using APICv/AVIC to accelerate L2 accesses to L1's APIC, * i.e. the access needs to be emulated. Emulating access to * L1's APIC is also correct if L1 is accelerating L2's own * virtual APIC, but for some reason L1 also maps _L1's_ APIC * into L2. Note, vcpu_is_mmio_gpa() always treats access to * the APIC as MMIO. Allow an MMIO SPTE to be created, as KVM * uses different roots for L1 vs. L2, i.e. there is no danger * of breaking APICv/AVIC for L1. */ if (is_guest_mode(vcpu)) return kvm_handle_noslot_fault(vcpu, fault, access); /* * If the APIC access page exists but is disabled, go directly * to emulation without caching the MMIO access or creating a * MMIO SPTE. That way the cache doesn't need to be purged * when the AVIC is re-enabled. */ if (!kvm_apicv_activated(vcpu->kvm)) return RET_PF_EMULATE; } /* * Check for a relevant mmu_notifier invalidation event before getting * the pfn from the primary MMU, and before acquiring mmu_lock. * * For mmu_lock, if there is an in-progress invalidation and the kernel * allows preemption, the invalidation task may drop mmu_lock and yield * in response to mmu_lock being contended, which is *very* counter- * productive as this vCPU can't actually make forward progress until * the invalidation completes. * * Retrying now can also avoid unnessary lock contention in the primary * MMU, as the primary MMU doesn't necessarily hold a single lock for * the duration of the invalidation, i.e. faulting in a conflicting pfn * can cause the invalidation to take longer by holding locks that are * needed to complete the invalidation. * * Do the pre-check even for non-preemtible kernels, i.e. even if KVM * will never yield mmu_lock in response to contention, as this vCPU is * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held * to detect retry guarantees the worst case latency for the vCPU. */ if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) return RET_PF_RETRY; ret = __kvm_mmu_faultin_pfn(vcpu, fault); if (ret != RET_PF_CONTINUE) return ret; if (unlikely(is_error_pfn(fault->pfn))) return kvm_handle_error_pfn(vcpu, fault); if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn))) return kvm_handle_noslot_fault(vcpu, fault, access); /* * Check again for a relevant mmu_notifier invalidation event purely to * avoid contending mmu_lock. Most invalidations will be detected by * the previous check, but checking is extremely cheap relative to the * overall cost of failing to detect the invalidation until after * mmu_lock is acquired. */ if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) { kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY); return RET_PF_RETRY; } return RET_PF_CONTINUE; } /* * Returns true if the page fault is stale and needs to be retried, i.e. if the * root was invalidated by a memslot update or a relevant mmu_notifier fired. */ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); /* Special roots, e.g. pae_root, are not backed by shadow pages. */ if (sp && is_obsolete_sp(vcpu->kvm, sp)) return true; /* * Roots without an associated shadow page are considered invalid if * there is a pending request to free obsolete roots. The request is * only a hint that the current root _may_ be obsolete and needs to be * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs * to reload even if no vCPU is actively using the root. */ if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) return true; /* * Check for a relevant mmu_notifier invalidation event one last time * now that mmu_lock is held, as the "unsafe" checks performed without * holding mmu_lock can get false negatives. */ return fault->slot && mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn); } static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { int r; /* Dummy roots are used only for shadowing bad guest roots. */ if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) return RET_PF_RETRY; if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_WRITE_PROTECTED; r = fast_page_fault(vcpu, fault); if (r != RET_PF_INVALID) return r; r = mmu_topup_memory_caches(vcpu, false); if (r) return r; r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL); if (r != RET_PF_CONTINUE) return r; r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; r = direct_map(vcpu, fault); out_unlock: kvm_mmu_finish_page_fault(vcpu, fault, r); write_unlock(&vcpu->kvm->mmu_lock); return r; } static int nonpaging_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ fault->max_level = PG_LEVEL_2M; return direct_page_fault(vcpu, fault); } int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len) { int r = 1; u32 flags = vcpu->arch.apf.host_apf_flags; #ifndef CONFIG_X86_64 /* A 64-bit CR2 should be impossible on 32-bit KVM. */ if (WARN_ON_ONCE(fault_address >> 32)) return -EFAULT; #endif /* * Legacy #PF exception only have a 32-bit error code. Simply drop the * upper bits as KVM doesn't use them for #PF (because they are never * set), and to ensure there are no collisions with KVM-defined bits. */ if (WARN_ON_ONCE(error_code >> 32)) error_code = lower_32_bits(error_code); /* * Restrict KVM-defined flags to bits 63:32 so that it's impossible for * them to conflict with #PF error codes, which are limited to 32 bits. */ BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK)); vcpu->arch.l1tf_flush_l1d = true; if (!flags) { trace_kvm_page_fault(vcpu, fault_address, error_code); r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, insn_len); } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { vcpu->arch.apf.host_apf_flags = 0; local_irq_disable(); kvm_async_pf_task_wait_schedule(fault_address); local_irq_enable(); } else { WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags); } return r; } EXPORT_SYMBOL_GPL(kvm_handle_page_fault); #ifdef CONFIG_X86_64 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { int r; if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_WRITE_PROTECTED; r = fast_page_fault(vcpu, fault); if (r != RET_PF_INVALID) return r; r = mmu_topup_memory_caches(vcpu, false); if (r) return r; r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL); if (r != RET_PF_CONTINUE) return r; r = RET_PF_RETRY; read_lock(&vcpu->kvm->mmu_lock); if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = kvm_tdp_mmu_map(vcpu, fault); out_unlock: kvm_mmu_finish_page_fault(vcpu, fault, r); read_unlock(&vcpu->kvm->mmu_lock); return r; } #endif bool kvm_mmu_may_ignore_guest_pat(void) { /* * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM * has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to * honor the memtype from the guest's PAT so that guest accesses to * memory that is DMA'd aren't cached against the guest's wishes. As a * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA, * KVM _always_ ignores guest PAT (when EPT is enabled). */ return shadow_memtype_mask; } int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { #ifdef CONFIG_X86_64 if (tdp_mmu_enabled) return kvm_tdp_mmu_page_fault(vcpu, fault); #endif return direct_page_fault(vcpu, fault); } static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level) { int r; /* * Restrict to TDP page fault, since that's the only case where the MMU * is indexed by GPA. */ if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault) return -EOPNOTSUPP; do { if (signal_pending(current)) return -EINTR; cond_resched(); r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level); } while (r == RET_PF_RETRY); if (r < 0) return r; switch (r) { case RET_PF_FIXED: case RET_PF_SPURIOUS: case RET_PF_WRITE_PROTECTED: return 0; case RET_PF_EMULATE: return -ENOENT; case RET_PF_RETRY: case RET_PF_CONTINUE: case RET_PF_INVALID: default: WARN_ONCE(1, "could not fix page fault during prefault"); return -EIO; } } long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, struct kvm_pre_fault_memory *range) { u64 error_code = PFERR_GUEST_FINAL_MASK; u8 level = PG_LEVEL_4K; u64 end; int r; if (!vcpu->kvm->arch.pre_fault_allowed) return -EOPNOTSUPP; /* * reload is efficient when called repeatedly, so we can do it on * every iteration. */ r = kvm_mmu_reload(vcpu); if (r) return r; if (kvm_arch_has_private_mem(vcpu->kvm) && kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa))) error_code |= PFERR_PRIVATE_ACCESS; /* * Shadow paging uses GVA for kvm page fault, so restrict to * two-dimensional paging. */ r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level); if (r < 0) return r; /* * If the mapping that covers range->gpa can use a huge page, it * may start below it or end after range->gpa + range->size. */ end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level); return min(range->size, end - range->gpa); } static void nonpaging_init_context(struct kvm_mmu *context) { context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; context->sync_spte = NULL; } static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, union kvm_mmu_page_role role) { struct kvm_mmu_page *sp; if (!VALID_PAGE(root->hpa)) return false; if (!role.direct && pgd != root->pgd) return false; sp = root_to_sp(root->hpa); if (WARN_ON_ONCE(!sp)) return false; return role.word == sp->role.word; } /* * Find out if a previously cached root matching the new pgd/role is available, * and insert the current root as the MRU in the cache. * If a matching root is found, it is assigned to kvm_mmu->root and * true is returned. * If no match is found, kvm_mmu->root is left invalid, the LRU root is * evicted to make room for the current root, and false is returned. */ static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) { uint i; if (is_root_usable(&mmu->root, new_pgd, new_role)) return true; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { /* * The swaps end up rotating the cache like this: * C 0 1 2 3 (on entry to the function) * 0 C 1 2 3 * 1 C 0 2 3 * 2 C 0 1 3 * 3 C 0 1 2 (on exit from the loop) */ swap(mmu->root, mmu->prev_roots[i]); if (is_root_usable(&mmu->root, new_pgd, new_role)) return true; } kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT); return false; } /* * Find out if a previously cached root matching the new pgd/role is available. * On entry, mmu->root is invalid. * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry * of the cache becomes invalid, and true is returned. * If no match is found, kvm_mmu->root is left invalid and false is returned. */ static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) { uint i; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role)) goto hit; return false; hit: swap(mmu->root, mmu->prev_roots[i]); /* Bubble up the remaining roots. */ for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++) mmu->prev_roots[i] = mmu->prev_roots[i + 1]; mmu->prev_roots[i].hpa = INVALID_PAGE; return true; } static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) { /* * Limit reuse to 64-bit hosts+VMs without "special" roots in order to * avoid having to deal with PDPTEs and other complexities. */ if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa)) kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT); if (VALID_PAGE(mmu->root.hpa)) return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role); else return cached_root_find_without_current(kvm, mmu, new_pgd, new_role); } void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) { struct kvm_mmu *mmu = vcpu->arch.mmu; union kvm_mmu_page_role new_role = mmu->root_role; /* * Return immediately if no usable root was found, kvm_mmu_reload() * will establish a valid root prior to the next VM-Enter. */ if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) return; /* * It's possible that the cached previous root page is obsolete because * of a change in the MMU generation number. However, changing the * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, * which will free the root set here and allocate a new one. */ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); if (force_flush_and_sync_on_reuse) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } /* * The last MMIO access's GVA and GPA are cached in the VCPU. When * switching to a new CR3, that GVA->GPA mapping may no longer be * valid. So clear any cached MMIO info even when we don't need to sync * the shadow page tables. */ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); /* * If this is a direct root page, it doesn't have a write flooding * count. Otherwise, clear the write flooding count. */ if (!new_role.direct) { struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); if (!WARN_ON_ONCE(!sp)) __clear_sp_write_flooding_count(sp); } } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) { if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) { if (gfn != get_mmio_spte_gfn(*sptep)) { mmu_spte_clear_no_track(sptep); return true; } mark_mmio_spte(vcpu, sptep, gfn, access); return true; } return false; } #define PTTYPE_EPT 18 /* arbitrary */ #define PTTYPE PTTYPE_EPT #include "paging_tmpl.h" #undef PTTYPE #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE #define PTTYPE 32 #include "paging_tmpl.h" #undef PTTYPE static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool pse, bool amd) { u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; u64 high_bits_rsvd; rsvd_check->bad_mt_xwr = 0; if (!gbpages) gbpages_bit_rsvd = rsvd_bits(7, 7); if (level == PT32E_ROOT_LEVEL) high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62); else high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); /* Note, NX doesn't exist in PDPTEs, this is handled below. */ if (!nx) high_bits_rsvd |= rsvd_bits(63, 63); /* * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for * leaf entries) on AMD CPUs only. */ if (amd) nonleaf_bit8_rsvd = rsvd_bits(8, 8); switch (level) { case PT32_ROOT_LEVEL: /* no rsvd bits for 2 level 4K page table entries */ rsvd_check->rsvd_bits_mask[0][1] = 0; rsvd_check->rsvd_bits_mask[0][0] = 0; rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; if (!pse) { rsvd_check->rsvd_bits_mask[1][1] = 0; break; } if (is_cpuid_PSE36()) /* 36bits PSE 4MB page */ rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); else /* 32 bits PSE 4MB page */ rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) | high_bits_rsvd | rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */ rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */ rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; case PT64_ROOT_5LEVEL: rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; fallthrough; case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7); rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | gbpages_bit_rsvd; rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | gbpages_bit_rsvd | rsvd_bits(13, 29); rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; } } static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { __reset_rsvds_bits_mask(&context->guest_rsvd_check, vcpu->arch.reserved_gpa_bits, context->cpu_role.base.level, is_efer_nx(context), guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES), is_cr4_pse(context), guest_cpuid_is_amd_compatible(vcpu)); } static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, bool execonly, int huge_page_level) { u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 large_1g_rsvd = 0, large_2m_rsvd = 0; u64 bad_mt_xwr; if (huge_page_level < PG_LEVEL_1G) large_1g_rsvd = rsvd_bits(7, 7); if (huge_page_level < PG_LEVEL_2M) large_2m_rsvd = rsvd_bits(7, 7); rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd; rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd; rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* large page */ rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd; rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd; rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ if (!execonly) { /* bits 0..2 must not be 100 unless VMX capabilities allow it */ bad_mt_xwr |= REPEAT_BYTE(1ull << 4); } rsvd_check->bad_mt_xwr = bad_mt_xwr; } static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly, int huge_page_level) { __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, vcpu->arch.reserved_gpa_bits, execonly, huge_page_level); } static inline u64 reserved_hpa_bits(void) { return rsvd_bits(kvm_host.maxphyaddr, 63); } /* * the page table on host is the shadow page table for the page * table in guest or amd nested guest, its mmu features completely * follow the features in guest. */ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */ bool is_amd = true; /* KVM doesn't use 2-level page tables for the shadow MMU. */ bool is_pse = false; struct rsvd_bits_validate *shadow_zero_check; int i; WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL); shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), context->root_role.level, context->root_role.efer_nx, guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES), is_pse, is_amd); if (!shadow_me_mask) return; for (i = context->root_role.level; --i >= 0;) { /* * So far shadow_me_value is a constant during KVM's life * time. Bits in shadow_me_value are allowed to be set. * Bits in shadow_me_mask but not in shadow_me_value are * not allowed to be set. */ shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask; shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value; shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value; } } static inline bool boot_cpu_is_amd(void) { WARN_ON_ONCE(!tdp_enabled); return shadow_x_mask == 0; } /* * the direct page table on host, use as much mmu features as * possible, however, kvm currently does not do execution-protection. */ static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) { struct rsvd_bits_validate *shadow_zero_check; int i; shadow_zero_check = &context->shadow_zero_check; if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), context->root_role.level, true, boot_cpu_has(X86_FEATURE_GBPAGES), false, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, reserved_hpa_bits(), false, max_huge_page_level); if (!shadow_me_mask) return; for (i = context->root_role.level; --i >= 0;) { shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; } } /* * as the comments in reset_shadow_zero_bits_mask() except it * is the shadow page table for intel nested guest. */ static void reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, reserved_hpa_bits(), execonly, max_huge_page_level); } #define BYTE_MASK(access) \ ((1 & (access) ? 2 : 0) | \ (2 & (access) ? 4 : 0) | \ (3 & (access) ? 8 : 0) | \ (4 & (access) ? 16 : 0) | \ (5 & (access) ? 32 : 0) | \ (6 & (access) ? 64 : 0) | \ (7 & (access) ? 128 : 0)) static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept) { unsigned byte; const u8 x = BYTE_MASK(ACC_EXEC_MASK); const u8 w = BYTE_MASK(ACC_WRITE_MASK); const u8 u = BYTE_MASK(ACC_USER_MASK); bool cr4_smep = is_cr4_smep(mmu); bool cr4_smap = is_cr4_smap(mmu); bool cr0_wp = is_cr0_wp(mmu); bool efer_nx = is_efer_nx(mmu); for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { unsigned pfec = byte << 1; /* * Each "*f" variable has a 1 bit for each UWX value * that causes a fault with the given PFEC. */ /* Faults from writes to non-writable pages */ u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; /* Faults from user mode accesses to supervisor pages */ u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; /* Faults from fetches of non-executable pages*/ u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; /* Faults from kernel mode fetches of user pages */ u8 smepf = 0; /* Faults from kernel mode accesses of user pages */ u8 smapf = 0; if (!ept) { /* Faults from kernel mode accesses to user pages */ u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; /* Not really needed: !nx will cause pte.nx to fault */ if (!efer_nx) ff = 0; /* Allow supervisor writes if !cr0.wp */ if (!cr0_wp) wf = (pfec & PFERR_USER_MASK) ? wf : 0; /* Disallow supervisor fetches of user code if cr4.smep */ if (cr4_smep) smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; /* * SMAP:kernel-mode data accesses from user-mode * mappings should fault. A fault is considered * as a SMAP violation if all of the following * conditions are true: * - X86_CR4_SMAP is set in CR4 * - A user page is accessed * - The access is not a fetch * - The access is supervisor mode * - If implicit supervisor access or X86_EFLAGS_AC is clear * * Here, we cover the first four conditions. * The fifth is computed dynamically in permission_fault(); * PFERR_RSVD_MASK bit will be set in PFEC if the access is * *not* subject to SMAP restrictions. */ if (cr4_smap) smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; } mmu->permissions[byte] = ff | uf | wf | smepf | smapf; } } /* * PKU is an additional mechanism by which the paging controls access to * user-mode addresses based on the value in the PKRU register. Protection * key violations are reported through a bit in the page fault error code. * Unlike other bits of the error code, the PK bit is not known at the * call site of e.g. gva_to_gpa; it must be computed directly in * permission_fault based on two bits of PKRU, on some machine state (CR4, * CR0, EFER, CPL), and on other bits of the error code and the page tables. * * In particular the following conditions come from the error code, the * page tables and the machine state: * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) * - PK is always zero if U=0 in the page tables * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. * * The PKRU bitmask caches the result of these four conditions. The error * code (minus the P bit) and the page table's U bit form an index into the * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed * with the two bits of the PKRU register corresponding to the protection key. * For the first three conditions above the bits will be 00, thus masking * away both AD and WD. For all reads or if the last condition holds, WD * only will be masked away. */ static void update_pkru_bitmask(struct kvm_mmu *mmu) { unsigned bit; bool wp; mmu->pkru_mask = 0; if (!is_cr4_pke(mmu)) return; wp = is_cr0_wp(mmu); for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { unsigned pfec, pkey_bits; bool check_pkey, check_write, ff, uf, wf, pte_user; pfec = bit << 1; ff = pfec & PFERR_FETCH_MASK; uf = pfec & PFERR_USER_MASK; wf = pfec & PFERR_WRITE_MASK; /* PFEC.RSVD is replaced by ACC_USER_MASK. */ pte_user = pfec & PFERR_RSVD_MASK; /* * Only need to check the access which is not an * instruction fetch and is to a user page. */ check_pkey = (!ff && pte_user); /* * write access is controlled by PKRU if it is a * user access or CR0.WP = 1. */ check_write = check_pkey && wf && (uf || wp); /* PKRU.AD stops both read and write access. */ pkey_bits = !!check_pkey; /* PKRU.WD stops write access. */ pkey_bits |= (!!check_write) << 1; mmu->pkru_mask |= (pkey_bits & 3) << pfec; } } static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { if (!is_cr0_pg(mmu)) return; reset_guest_rsvds_bits_mask(vcpu, mmu); update_permission_bitmask(mmu, false); update_pkru_bitmask(mmu); } static void paging64_init_context(struct kvm_mmu *context) { context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->sync_spte = paging64_sync_spte; } static void paging32_init_context(struct kvm_mmu *context) { context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; context->sync_spte = paging32_sync_spte; } static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { union kvm_cpu_role role = {0}; role.base.access = ACC_ALL; role.base.smm = is_smm(vcpu); role.base.guest_mode = is_guest_mode(vcpu); role.ext.valid = 1; if (!____is_cr0_pg(regs)) { role.base.direct = 1; return role; } role.base.efer_nx = ____is_efer_nx(regs); role.base.cr0_wp = ____is_cr0_wp(regs); role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs); role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs); role.base.has_4_byte_gpte = !____is_cr4_pae(regs); if (____is_efer_lma(regs)) role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; else if (____is_cr4_pae(regs)) role.base.level = PT32E_ROOT_LEVEL; else role.base.level = PT32_ROOT_LEVEL; role.ext.cr4_smep = ____is_cr4_smep(regs); role.ext.cr4_smap = ____is_cr4_smap(regs); role.ext.cr4_pse = ____is_cr4_pse(regs); /* PKEY and LA57 are active iff long mode is active. */ role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs); role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs); role.ext.efer_lma = ____is_efer_lma(regs); return role; } void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP); BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP); BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS)); if (is_cr0_wp(mmu) == cr0_wp) return; mmu->cpu_role.base.cr0_wp = cr0_wp; reset_guest_paging_metadata(vcpu, mmu); } static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) { /* tdp_root_level is architecture forced level, use it if nonzero */ if (tdp_root_level) return tdp_root_level; /* Use 5-level TDP if and only if it's useful/necessary. */ if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48) return 4; return max_tdp_level; } u8 kvm_mmu_get_max_tdp_level(void) { return tdp_root_level ? tdp_root_level : max_tdp_level; } static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) { union kvm_mmu_page_role role = {0}; role.access = ACC_ALL; role.cr0_wp = true; role.efer_nx = true; role.smm = cpu_role.base.smm; role.guest_mode = cpu_role.base.guest_mode; role.ad_disabled = !kvm_ad_enabled; role.level = kvm_mmu_get_tdp_level(vcpu); role.direct = true; role.has_4_byte_gpte = false; return role; } static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) { struct kvm_mmu *context = &vcpu->arch.root_mmu; union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role); if (cpu_role.as_u64 == context->cpu_role.as_u64 && root_role.word == context->root_role.word) return; context->cpu_role.as_u64 = cpu_role.as_u64; context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; context->sync_spte = NULL; context->get_guest_pgd = get_guest_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; if (!is_cr0_pg(context)) context->gva_to_gpa = nonpaging_gva_to_gpa; else if (is_cr4_pae(context)) context->gva_to_gpa = paging64_gva_to_gpa; else context->gva_to_gpa = paging32_gva_to_gpa; reset_guest_paging_metadata(vcpu, context); reset_tdp_shadow_zero_bits_mask(context); } static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, union kvm_cpu_role cpu_role, union kvm_mmu_page_role root_role) { if (cpu_role.as_u64 == context->cpu_role.as_u64 && root_role.word == context->root_role.word) return; context->cpu_role.as_u64 = cpu_role.as_u64; context->root_role.word = root_role.word; if (!is_cr0_pg(context)) nonpaging_init_context(context); else if (is_cr4_pae(context)) paging64_init_context(context); else paging32_init_context(context); reset_guest_paging_metadata(vcpu, context); reset_shadow_zero_bits_mask(vcpu, context); } static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) { struct kvm_mmu *context = &vcpu->arch.root_mmu; union kvm_mmu_page_role root_role; root_role = cpu_role.base; /* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */ root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL); /* * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role. * KVM uses NX when TDP is disabled to handle a variety of scenarios, * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. * The iTLB multi-hit workaround can be toggled at any time, so assume * NX can be used by any non-nested shadow MMU to avoid having to reset * MMU contexts. */ root_role.efer_nx = true; shadow_mmu_init_context(vcpu, context, cpu_role, root_role); } void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3) { struct kvm_mmu *context = &vcpu->arch.guest_mmu; struct kvm_mmu_role_regs regs = { .cr0 = cr0, .cr4 = cr4 & ~X86_CR4_PKE, .efer = efer, }; union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s); union kvm_mmu_page_role root_role; /* NPT requires CR0.PG=1. */ WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode); root_role = cpu_role.base; root_role.level = kvm_mmu_get_tdp_level(vcpu); if (root_role.level == PT64_ROOT_5LEVEL && cpu_role.base.level == PT64_ROOT_4LEVEL) root_role.passthrough = 1; shadow_mmu_init_context(vcpu, context, cpu_role, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); static union kvm_cpu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly, u8 level) { union kvm_cpu_role role = {0}; /* * KVM does not support SMM transfer monitors, and consequently does not * support the "entry to SMM" control either. role.base.smm is always 0. */ WARN_ON_ONCE(is_smm(vcpu)); role.base.level = level; role.base.has_4_byte_gpte = false; role.base.direct = false; role.base.ad_disabled = !accessed_dirty; role.base.guest_mode = true; role.base.access = ACC_ALL; role.ext.word = 0; role.ext.execonly = execonly; role.ext.valid = 1; return role; } void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp) { struct kvm_mmu *context = &vcpu->arch.guest_mmu; u8 level = vmx_eptp_page_walk_level(new_eptp); union kvm_cpu_role new_mode = kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, execonly, level); if (new_mode.as_u64 != context->cpu_role.as_u64) { /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */ context->cpu_role.as_u64 = new_mode.as_u64; context->root_role.word = new_mode.base.word; context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; context->sync_spte = ept_sync_spte; update_permission_bitmask(context, true); context->pkru_mask = 0; reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level); reset_ept_shadow_zero_bits_mask(context, execonly); } kvm_mmu_new_pgd(vcpu, new_eptp); } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); static void init_kvm_softmmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) { struct kvm_mmu *context = &vcpu->arch.root_mmu; kvm_init_shadow_mmu(vcpu, cpu_role); context->get_guest_pgd = get_guest_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; } static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role new_mode) { struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; if (new_mode.as_u64 == g_context->cpu_role.as_u64) return; g_context->cpu_role.as_u64 = new_mode.as_u64; g_context->get_guest_pgd = get_guest_cr3; g_context->get_pdptr = kvm_pdptr_read; g_context->inject_page_fault = kvm_inject_page_fault; /* * L2 page tables are never shadowed, so there is no need to sync * SPTEs. */ g_context->sync_spte = NULL; /* * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using * L1's nested page tables (e.g. EPT12). The nested translation * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using * L2's page tables as the first level of translation and L1's * nested page tables as the second level of translation. Basically * the gva_to_gpa functions between mmu and nested_mmu are swapped. */ if (!is_paging(vcpu)) g_context->gva_to_gpa = nonpaging_gva_to_gpa; else if (is_long_mode(vcpu)) g_context->gva_to_gpa = paging64_gva_to_gpa; else if (is_pae(vcpu)) g_context->gva_to_gpa = paging64_gva_to_gpa; else g_context->gva_to_gpa = paging32_gva_to_gpa; reset_guest_paging_metadata(vcpu, g_context); } void kvm_init_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s); if (mmu_is_nested(vcpu)) init_kvm_nested_mmu(vcpu, cpu_role); else if (tdp_enabled) init_kvm_tdp_mmu(vcpu, cpu_role); else init_kvm_softmmu(vcpu, cpu_role); } EXPORT_SYMBOL_GPL(kvm_init_mmu); void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) { /* * Invalidate all MMU roles to force them to reinitialize as CPUID * information is factored into reserved bit calculations. * * Correctly handling multiple vCPU models with respect to paging and * physical address properties) in a single VM would require tracking * all relevant CPUID information in kvm_mmu_page_role. That is very * undesirable as it would increase the memory requirements for * gfn_write_track (see struct kvm_mmu_page_role comments). For now * that problem is swept under the rug; KVM's CPUID API is horrific and * it's all but impossible to solve it without introducing a new API. */ vcpu->arch.root_mmu.root_role.invalid = 1; vcpu->arch.guest_mmu.root_role.invalid = 1; vcpu->arch.nested_mmu.root_role.invalid = 1; vcpu->arch.root_mmu.cpu_role.ext.valid = 0; vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; kvm_mmu_reset_context(vcpu); /* * Changing guest CPUID after KVM_RUN is forbidden, see the comment in * kvm_arch_vcpu_ioctl(). */ KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm); } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); kvm_init_mmu(vcpu); } EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); int kvm_mmu_load(struct kvm_vcpu *vcpu) { int r; r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct); if (r) goto out; r = mmu_alloc_special_roots(vcpu); if (r) goto out; if (vcpu->arch.mmu->root_role.direct) r = mmu_alloc_direct_roots(vcpu); else r = mmu_alloc_shadow_roots(vcpu); if (r) goto out; kvm_mmu_sync_roots(vcpu); kvm_mmu_load_pgd(vcpu); /* * Flush any TLB entries for the new root, the provenance of the root * is unknown. Even if KVM ensures there are no stale TLB entries * for a freed root, in theory another hypervisor could have left * stale entries. Flushing on alloc also allows KVM to skip the TLB * flush when freeing a root (see kvm_tdp_mmu_put_root()). */ kvm_x86_call(flush_tlb_current)(vcpu); out: return r; } void kvm_mmu_unload(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa)); kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa)); vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); } static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa) { struct kvm_mmu_page *sp; if (!VALID_PAGE(root_hpa)) return false; /* * When freeing obsolete roots, treat roots as obsolete if they don't * have an associated shadow page, as it's impossible to determine if * such roots are fresh or stale. This does mean KVM will get false * positives and free roots that don't strictly need to be freed, but * such false positives are relatively rare: * * (a) only PAE paging and nested NPT have roots without shadow pages * (or any shadow paging flavor with a dummy root, see note below) * (b) remote reloads due to a memslot update obsoletes _all_ roots * (c) KVM doesn't track previous roots for PAE paging, and the guest * is unlikely to zap an in-use PGD. * * Note! Dummy roots are unique in that they are obsoleted by memslot * _creation_! See also FNAME(fetch). */ sp = root_to_sp(root_hpa); return !sp || is_obsolete_sp(kvm, sp); } static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) { unsigned long roots_to_free = 0; int i; if (is_obsolete_root(kvm, mmu->root.hpa)) roots_to_free |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } if (roots_to_free) kvm_mmu_free_roots(kvm, mmu, roots_to_free); } void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu) { __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu); __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu); } EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots); static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, int *bytes) { u64 gentry = 0; int r; /* * Assume that the pte write on a page table of the same type * as the current vcpu paging mode since we update the sptes only * when they have the same mode. */ if (is_pae(vcpu) && *bytes == 4) { /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ *gpa &= ~(gpa_t)7; *bytes = 8; } if (*bytes == 4 || *bytes == 8) { r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); if (r) gentry = 0; } return gentry; } /* * If we're seeing too many writes to a page, it may no longer be a page table, * or we may be forking, in which case it is better to unmap the page. */ static bool detect_write_flooding(struct kvm_mmu_page *sp) { /* * Skip write-flooding detected for the sp whose level is 1, because * it can become unsync, then the guest page is not write-protected. */ if (sp->role.level == PG_LEVEL_4K) return false; atomic_inc(&sp->write_flooding_count); return atomic_read(&sp->write_flooding_count) >= 3; } /* * Misaligned accesses are too much trouble to fix up; also, they usually * indicate a page is not used as a page table. */ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, int bytes) { unsigned offset, pte_size, misaligned; offset = offset_in_page(gpa); pte_size = sp->role.has_4_byte_gpte ? 4 : 8; /* * Sometimes, the OS only writes the last one bytes to update status * bits, for example, in linux, andb instruction is used in clear_bit(). */ if (!(offset & (pte_size - 1)) && bytes == 1) return false; misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned |= bytes < 4; return misaligned; } static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) { unsigned page_offset, quadrant; u64 *spte; int level; page_offset = offset_in_page(gpa); level = sp->role.level; *nspte = 1; if (sp->role.has_4_byte_gpte) { page_offset <<= 1; /* 32->64 */ /* * A 32-bit pde maps 4MB while the shadow pdes map * only 2MB. So we need to double the offset again * and zap two pdes instead of one. */ if (level == PT32_ROOT_LEVEL) { page_offset &= ~7; /* kill rounding error */ page_offset <<= 1; *nspte = 2; } quadrant = page_offset >> PAGE_SHIFT; page_offset &= ~PAGE_MASK; if (quadrant != sp->role.quadrant) return NULL; } spte = &sp->spt[page_offset / sizeof(*spte)]; return spte; } void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_mmu_page *sp; LIST_HEAD(invalid_list); u64 entry, gentry, *spte; int npte; bool flush = false; /* * When emulating guest writes, ensure the written value is visible to * any task that is handling page faults before checking whether or not * KVM is shadowing a guest PTE. This ensures either KVM will create * the correct SPTE in the page fault handler, or this task will see * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in * account_shadowed(). */ smp_mb(); if (!vcpu->kvm->arch.indirect_shadow_pages) return; write_lock(&vcpu->kvm->mmu_lock); gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); ++vcpu->kvm->stat.mmu_pte_write; for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) { if (detect_write_misaligned(sp, gpa, bytes) || detect_write_flooding(sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); ++vcpu->kvm->stat.mmu_flooded; continue; } spte = get_written_sptes(sp, gpa, &npte); if (!spte) continue; while (npte--) { entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); if (gentry && sp->role.level != PG_LEVEL_4K) ++vcpu->kvm->stat.mmu_pde_zapped; if (is_shadow_present_pte(entry)) flush = true; ++spte; } } kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); write_unlock(&vcpu->kvm->mmu_lock); } static bool is_write_to_guest_page_table(u64 error_code) { const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK; return (error_code & mask) == mask; } static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, int *emulation_type) { bool direct = vcpu->arch.mmu->root_role.direct; /* * Do not try to unprotect and retry if the vCPU re-faulted on the same * RIP with the same address that was previously unprotected, as doing * so will likely put the vCPU into an infinite. E.g. if the vCPU uses * a non-page-table modifying instruction on the PDE that points to the * instruction, then unprotecting the gfn will unmap the instruction's * code, i.e. make it impossible for the instruction to ever complete. */ if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) && vcpu->arch.last_retry_addr == cr2_or_gpa) return RET_PF_EMULATE; /* * Reset the unprotect+retry values that guard against infinite loops. * The values will be refreshed if KVM explicitly unprotects a gfn and * retries, in all other cases it's safe to retry in the future even if * the next page fault happens on the same RIP+address. */ vcpu->arch.last_retry_eip = 0; vcpu->arch.last_retry_addr = 0; /* * It should be impossible to reach this point with an MMIO cache hit, * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid, * writable memslot, and creating a memslot should invalidate the MMIO * cache by way of changing the memslot generation. WARN and disallow * retry if MMIO is detected, as retrying MMIO emulation is pointless * and could put the vCPU into an infinite loop because the processor * will keep faulting on the non-existent MMIO address. */ if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct))) return RET_PF_EMULATE; /* * Before emulating the instruction, check to see if the access was due * to a read-only violation while the CPU was walking non-nested NPT * page tables, i.e. for a direct MMU, for _guest_ page tables in L1. * If L1 is sharing (a subset of) its page tables with L2, e.g. by * having nCR3 share lower level page tables with hCR3, then when KVM * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also * unknowingly write-protecting L1's guest page tables, which KVM isn't * shadowing. * * Because the CPU (by default) walks NPT page tables using a write * access (to ensure the CPU can do A/D updates), page walks in L1 can * trigger write faults for the above case even when L1 isn't modifying * PTEs. As a result, KVM will unnecessarily emulate (or at least, try * to emulate) an excessive number of L1 instructions; because L1's MMU * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs * and thus no need to emulate in order to guarantee forward progress. * * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can * proceed without triggering emulation. If one or more shadow pages * was zapped, skip emulation and resume L1 to let it natively execute * the instruction. If no shadow pages were zapped, then the write- * fault is due to something else entirely, i.e. KVM needs to emulate, * as resuming the guest will put it into an infinite loop. * * Note, this code also applies to Intel CPUs, even though it is *very* * unlikely that an L1 will share its page tables (IA32/PAE/paging64 * format) with L2's page tables (EPT format). * * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to * unprotect the gfn and retry if an event is awaiting reinjection. If * KVM emulates multiple instructions before completing event injection, * the event could be delayed beyond what is architecturally allowed, * e.g. KVM could inject an IRQ after the TPR has been raised. */ if (((direct && is_write_to_guest_page_table(error_code)) || (!direct && kvm_event_needs_reinjection(vcpu))) && kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa)) return RET_PF_RETRY; /* * The gfn is write-protected, but if KVM detects its emulating an * instruction that is unlikely to be used to modify page tables, or if * emulation fails, KVM can try to unprotect the gfn and let the CPU * re-execute the instruction that caused the page fault. Do not allow * retrying an instruction from a nested guest as KVM is only explicitly * shadowing L1's page tables, i.e. unprotecting something for L1 isn't * going to magically fix whatever issue caused L2 to fail. */ if (!is_guest_mode(vcpu)) *emulation_type |= EMULTYPE_ALLOW_RETRY_PF; return RET_PF_EMULATE; } int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len) { int r, emulation_type = EMULTYPE_PF; bool direct = vcpu->arch.mmu->root_role.direct; if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) return RET_PF_RETRY; /* * Except for reserved faults (emulated MMIO is shared-only), set the * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's * current attributes, which are the source of truth for such VMs. Note, * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't * currently supported nested virtualization (among many other things) * for software-protected VMs. */ if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && !(error_code & PFERR_RSVD_MASK) && vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM && kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa))) error_code |= PFERR_PRIVATE_ACCESS; r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS)) return -EFAULT; r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); if (r == RET_PF_EMULATE) goto emulate; } if (r == RET_PF_INVALID) { vcpu->stat.pf_taken++; r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false, &emulation_type, NULL); if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm)) return -EIO; } if (r < 0) return r; if (r == RET_PF_WRITE_PROTECTED) r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code, &emulation_type); if (r == RET_PF_FIXED) vcpu->stat.pf_fixed++; else if (r == RET_PF_EMULATE) vcpu->stat.pf_emulate++; else if (r == RET_PF_SPURIOUS) vcpu->stat.pf_spurious++; /* * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE. * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to * indicate continuing the page fault handling until to the final * page table mapping phase. */ WARN_ON_ONCE(r == RET_PF_CONTINUE); if (r != RET_PF_EMULATE) return r; emulate: return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len); } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg) { u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; int root_level, leaf, level; leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level); if (unlikely(leaf < 0)) return; pr_err("%s %llx", msg, gpa); for (level = root_level; level >= leaf; level--) pr_cont(", spte[%d] = 0x%llx", level, sptes[level]); pr_cont("\n"); } EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes); static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, hpa_t root_hpa) { struct kvm_shadow_walk_iterator iterator; vcpu_clear_mmio_info(vcpu, addr); /* * Walking and synchronizing SPTEs both assume they are operating in * the context of the current MMU, and would need to be reworked if * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT. */ if (WARN_ON_ONCE(mmu != vcpu->arch.mmu)) return; if (!VALID_PAGE(root_hpa)) return; write_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); if (sp->unsync) { int ret = kvm_sync_spte(vcpu, sp, iterator.index); if (ret < 0) mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL); if (ret) kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep); } if (!sp->unsync_children) break; } write_unlock(&vcpu->kvm->mmu_lock); } void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots) { int i; WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL); /* It's actually a GPA for vcpu->arch.guest_mmu. */ if (mmu != &vcpu->arch.guest_mmu) { /* INVLPG on a non-canonical address is a NOP according to the SDM. */ if (is_noncanonical_invlpg_address(addr, vcpu)) return; kvm_x86_call(flush_tlb_gva)(vcpu, addr); } if (!mmu->sync_spte) return; if (roots & KVM_MMU_ROOT_CURRENT) __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (roots & KVM_MMU_ROOT_PREVIOUS(i)) __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); } } EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { /* * INVLPG is required to invalidate any global mappings for the VA, * irrespective of PCID. Blindly sync all roots as it would take * roughly the same amount of work/time to determine whether any of the * previous roots have a global mapping. * * Mappings not reachable via the current or previous cached roots will * be synced when switching to that new cr3, so nothing needs to be * done here for them. */ kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) { struct kvm_mmu *mmu = vcpu->arch.mmu; unsigned long roots = 0; uint i; if (pcid == kvm_get_active_pcid(vcpu)) roots |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) roots |= KVM_MMU_ROOT_PREVIOUS(i); } if (roots) kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots); ++vcpu->stat.invlpg; /* * Mappings not reachable via the current cr3 or the prev_roots will be * synced when switching to that cr3, so nothing needs to be done here * for them. */ } void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level) { tdp_enabled = enable_tdp; tdp_root_level = tdp_forced_root_level; max_tdp_level = tdp_max_root_level; #ifdef CONFIG_X86_64 tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled; #endif /* * max_huge_page_level reflects KVM's MMU capabilities irrespective * of kernel support, e.g. KVM may be capable of using 1GB pages when * the kernel is not. But, KVM never creates a page size greater than * what is used by the kernel for any given HVA, i.e. the kernel's * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust(). */ if (tdp_enabled) max_huge_page_level = tdp_huge_page_level; else if (boot_cpu_has(X86_FEATURE_GBPAGES)) max_huge_page_level = PG_LEVEL_1G; else max_huge_page_level = PG_LEVEL_2M; } EXPORT_SYMBOL_GPL(kvm_configure_mmu); static void free_mmu_pages(struct kvm_mmu *mmu) { if (!tdp_enabled && mmu->pae_root) set_memory_encrypted((unsigned long)mmu->pae_root, 1); free_page((unsigned long)mmu->pae_root); free_page((unsigned long)mmu->pml4_root); free_page((unsigned long)mmu->pml5_root); } static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { struct page *page; int i; mmu->root.hpa = INVALID_PAGE; mmu->root.pgd = 0; mmu->mirror_root_hpa = INVALID_PAGE; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */ if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu) return 0; /* * When using PAE paging, the four PDPTEs are treated as 'root' pages, * while the PDP table is a per-vCPU construct that's allocated at MMU * creation. When emulating 32-bit mode, cr3 is only 32 bits even on * x86_64. Therefore we need to allocate the PDP table in the first * 4GB of memory, which happens to fit the DMA32 zone. TDP paging * generally doesn't use PAE paging and can skip allocating the PDP * table. The main exception, handled here, is SVM's 32-bit NPT. The * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit * KVM; that horror is handled on-demand by mmu_alloc_special_roots(). */ if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) return 0; page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); if (!page) return -ENOMEM; mmu->pae_root = page_address(page); /* * CR3 is only 32 bits when PAE paging is used, thus it's impossible to * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so * that KVM's writes and the CPU's reads get along. Note, this is * only necessary when using shadow paging, as 64-bit NPT can get at * the C-bit even when shadowing 32-bit NPT, and SME isn't supported * by 32-bit kernels (when KVM itself uses 32-bit NPT). */ if (!tdp_enabled) set_memory_decrypted((unsigned long)mmu->pae_root, 1); else WARN_ON_ONCE(shadow_me_value); for (i = 0; i < 4; ++i) mmu->pae_root[i] = INVALID_PAE_ROOT; return 0; } int kvm_mmu_create(struct kvm_vcpu *vcpu) { int ret; vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; vcpu->arch.mmu_shadow_page_cache.init_value = SHADOW_NONPRESENT_VALUE; if (!vcpu->arch.mmu_shadow_page_cache.init_value) vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); if (ret) return ret; ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); if (ret) goto fail_allocate_root; return ret; fail_allocate_root: free_mmu_pages(&vcpu->arch.guest_mmu); return ret; } #define BATCH_ZAP_PAGES 10 static void kvm_zap_obsolete_pages(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; int nr_zapped, batch = 0; LIST_HEAD(invalid_list); bool unstable; lockdep_assert_held(&kvm->slots_lock); restart: list_for_each_entry_safe_reverse(sp, node, &kvm->arch.active_mmu_pages, link) { /* * No obsolete valid page exists before a newly created page * since active_mmu_pages is a FIFO list. */ if (!is_obsolete_sp(kvm, sp)) break; /* * Invalid pages should never land back on the list of active * pages. Skip the bogus page, otherwise we'll get stuck in an * infinite loop if the page gets put back on the list (again). */ if (WARN_ON_ONCE(sp->role.invalid)) continue; /* * No need to flush the TLB since we're only zapping shadow * pages with an obsolete generation number and all vCPUS have * loaded a new root, i.e. the shadow pages being zapped cannot * be in active use by the guest. */ if (batch >= BATCH_ZAP_PAGES && cond_resched_rwlock_write(&kvm->mmu_lock)) { batch = 0; goto restart; } unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &nr_zapped); batch += nr_zapped; if (unstable) goto restart; } /* * Kick all vCPUs (via remote TLB flush) before freeing the page tables * to ensure KVM is not in the middle of a lockless shadow page table * walk, which may reference the pages. The remote TLB flush itself is * not required and is simply a convenient way to kick vCPUs as needed. * KVM performs a local TLB flush when allocating a new root (see * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are * running with an obsolete MMU. */ kvm_mmu_commit_zap_page(kvm, &invalid_list); } /* * Fast invalidate all shadow pages and use lock-break technique * to zap obsolete pages. * * It's required when memslot is being deleted or VM is being * destroyed, in these cases, we should ensure that KVM MMU does * not use any resource of the being-deleted slot or all slots * after calling the function. */ static void kvm_mmu_zap_all_fast(struct kvm *kvm) { lockdep_assert_held(&kvm->slots_lock); write_lock(&kvm->mmu_lock); trace_kvm_mmu_zap_all_fast(kvm); /* * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is * held for the entire duration of zapping obsolete pages, it's * impossible for there to be multiple invalid generations associated * with *valid* shadow pages at any given time, i.e. there is exactly * one valid generation and (at most) one invalid generation. */ kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; /* * In order to ensure all vCPUs drop their soon-to-be invalid roots, * invalidating TDP MMU roots must be done while holding mmu_lock for * write and in the same critical section as making the reload request, * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield. */ if (tdp_mmu_enabled) { /* * External page tables don't support fast zapping, therefore * their mirrors must be invalidated separately by the caller. */ kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS); } /* * Notify all vcpus to reload its shadow page table and flush TLB. * Then all vcpus will switch to new shadow page table with the new * mmu_valid_gen. * * Note: we need to do this under the protection of mmu_lock, * otherwise, vcpu would purge shadow page but miss tlb flush. */ kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS); kvm_zap_obsolete_pages(kvm); write_unlock(&kvm->mmu_lock); /* * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before * returning to the caller, e.g. if the zap is in response to a memslot * deletion, mmu_notifier callbacks will be unable to reach the SPTEs * associated with the deleted memslot once the update completes, and * Deferring the zap until the final reference to the root is put would * lead to use-after-free. */ if (tdp_mmu_enabled) kvm_tdp_mmu_zap_invalidated_roots(kvm, true); } void kvm_mmu_init_vm(struct kvm *kvm) { kvm->arch.shadow_mmio_value = shadow_mmio_value; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); if (tdp_mmu_enabled) kvm_mmu_init_tdp_mmu(kvm); kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache; kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO; kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO; kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; } static void mmu_free_vm_memory_caches(struct kvm *kvm) { kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache); kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache); kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache); } void kvm_mmu_uninit_vm(struct kvm *kvm) { if (tdp_mmu_enabled) kvm_mmu_uninit_tdp_mmu(kvm); mmu_free_vm_memory_caches(kvm); } static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { const struct kvm_memory_slot *memslot; struct kvm_memslots *slots; struct kvm_memslot_iter iter; bool flush = false; gfn_t start, end; int i; if (!kvm_memslots_have_rmaps(kvm)) return flush; for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) { memslot = iter.slot; start = max(gfn_start, memslot->base_gfn); end = min(gfn_end, memslot->base_gfn + memslot->npages); if (WARN_ON_ONCE(start >= end)) continue; flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start, end, true, flush); } } return flush; } /* * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end * (not including it) */ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { bool flush; if (WARN_ON_ONCE(gfn_end <= gfn_start)) return; write_lock(&kvm->mmu_lock); kvm_mmu_invalidate_begin(kvm); kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end); flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); if (tdp_mmu_enabled) flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush); if (flush) kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); kvm_mmu_invalidate_end(kvm); write_unlock(&kvm->mmu_lock); } static bool slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { return rmap_write_protect(rmap_head, false); } void kvm_mmu_slot_remove_write_access(struct kvm *kvm, const struct kvm_memory_slot *memslot, int start_level) { if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); write_unlock(&kvm->mmu_lock); } if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); read_unlock(&kvm->mmu_lock); } } static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min) { return kvm_mmu_memory_cache_nr_free_objects(cache) < min; } static bool need_topup_split_caches_or_resched(struct kvm *kvm) { if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) return true; /* * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed * to split a single huge page. Calculating how many are actually needed * is possible but not worth the complexity. */ return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) || need_topup(&kvm->arch.split_page_header_cache, 1) || need_topup(&kvm->arch.split_shadow_page_cache, 1); } static int topup_split_caches(struct kvm *kvm) { /* * Allocating rmap list entries when splitting huge pages for nested * MMUs is uncommon as KVM needs to use a list if and only if there is * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be * aliased by multiple L2 gfns and/or from multiple nested roots with * different roles. Aliasing gfns when using TDP is atypical for VMMs; * a few gfns are often aliased during boot, e.g. when remapping BIOS, * but aliasing rarely occurs post-boot or for many gfns. If there is * only one rmap entry, rmap->val points directly at that one entry and * doesn't need to allocate a list. Buffer the cache by the default * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM * encounters an aliased gfn or two. */ const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS + KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE; int r; lockdep_assert_held(&kvm->slots_lock); r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity, SPLIT_DESC_CACHE_MIN_NR_OBJECTS); if (r) return r; r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1); if (r) return r; return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1); } static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep) { struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep); struct shadow_page_caches caches = {}; union kvm_mmu_page_role role; unsigned int access; gfn_t gfn; gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep)); /* * Note, huge page splitting always uses direct shadow pages, regardless * of whether the huge page itself is mapped by a direct or indirect * shadow page, since the huge page region itself is being directly * mapped with smaller pages. */ role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access); /* Direct SPs do not require a shadowed_info_cache. */ caches.page_header_cache = &kvm->arch.split_page_header_cache; caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache; /* Safe to pass NULL for vCPU since requesting a direct SP. */ return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role); } static void shadow_mmu_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) { struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache; u64 huge_spte = READ_ONCE(*huge_sptep); struct kvm_mmu_page *sp; bool flush = false; u64 *sptep, spte; gfn_t gfn; int index; sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep); for (index = 0; index < SPTE_ENT_PER_PAGE; index++) { sptep = &sp->spt[index]; gfn = kvm_mmu_page_get_gfn(sp, index); /* * The SP may already have populated SPTEs, e.g. if this huge * page is aliased by multiple sptes with the same access * permissions. These entries are guaranteed to map the same * gfn-to-pfn translation since the SP is direct, so no need to * modify them. * * However, if a given SPTE points to a lower level page table, * that lower level page table may only be partially populated. * Installing such SPTEs would effectively unmap a potion of the * huge page. Unmapping guest memory always requires a TLB flush * since a subsequent operation on the unmapped regions would * fail to detect the need to flush. */ if (is_shadow_present_pte(*sptep)) { flush |= !is_last_spte(*sptep, sp->role.level); continue; } spte = make_small_spte(kvm, huge_spte, sp->role, index); mmu_spte_set(sptep, spte); __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); } __link_shadow_page(kvm, cache, huge_sptep, sp, flush); } static int shadow_mmu_try_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) { struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep); int level, r = 0; gfn_t gfn; u64 spte; /* Grab information for the tracepoint before dropping the MMU lock. */ gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); level = huge_sp->role.level; spte = *huge_sptep; if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) { r = -ENOSPC; goto out; } if (need_topup_split_caches_or_resched(kvm)) { write_unlock(&kvm->mmu_lock); cond_resched(); /* * If the topup succeeds, return -EAGAIN to indicate that the * rmap iterator should be restarted because the MMU lock was * dropped. */ r = topup_split_caches(kvm) ?: -EAGAIN; write_lock(&kvm->mmu_lock); goto out; } shadow_mmu_split_huge_page(kvm, slot, huge_sptep); out: trace_kvm_mmu_split_huge_page(gfn, spte, level, r); return r; } static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { struct rmap_iterator iter; struct kvm_mmu_page *sp; u64 *huge_sptep; int r; restart: for_each_rmap_spte(rmap_head, &iter, huge_sptep) { sp = sptep_to_sp(huge_sptep); /* TDP MMU is enabled, so rmap only contains nested MMU SPs. */ if (WARN_ON_ONCE(!sp->role.guest_mode)) continue; /* The rmaps should never contain non-leaf SPTEs. */ if (WARN_ON_ONCE(!is_large_pte(*huge_sptep))) continue; /* SPs with level >PG_LEVEL_4K should never by unsync. */ if (WARN_ON_ONCE(sp->unsync)) continue; /* Don't bother splitting huge pages on invalid SPs. */ if (sp->role.invalid) continue; r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep); /* * The split succeeded or needs to be retried because the MMU * lock was dropped. Either way, restart the iterator to get it * back into a consistent state. */ if (!r || r == -EAGAIN) goto restart; /* The split failed and shouldn't be retried (e.g. -ENOMEM). */ break; } return false; } static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level) { int level; /* * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working * down to the target level. This ensures pages are recursively split * all the way to the target level. There's no need to split pages * already at the target level. */ for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages, level, level, start, end - 1, true, true, false); } /* Must be called with the mmu_lock held in write-mode. */ void kvm_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *memslot, u64 start, u64 end, int target_level) { if (!tdp_mmu_enabled) return; if (kvm_memslots_have_rmaps(kvm)) kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level); kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false); /* * A TLB flush is unnecessary at this point for the same reasons as in * kvm_mmu_slot_try_split_huge_pages(). */ } void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *memslot, int target_level) { u64 start = memslot->base_gfn; u64 end = start + memslot->npages; if (!tdp_mmu_enabled) return; if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level); write_unlock(&kvm->mmu_lock); } read_lock(&kvm->mmu_lock); kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true); read_unlock(&kvm->mmu_lock); /* * No TLB flush is necessary here. KVM will flush TLBs after * write-protecting and/or clearing dirty on the newly split SPTEs to * ensure that guest writes are reflected in the dirty log before the * ioctl to enable dirty logging on this memslot completes. Since the * split SPTEs retain the write and dirty bits of the huge SPTE, it is * safe for KVM to decide if a TLB flush is necessary based on the split * SPTEs. */ } static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { u64 *sptep; struct rmap_iterator iter; int need_tlb_flush = 0; struct kvm_mmu_page *sp; restart: for_each_rmap_spte(rmap_head, &iter, sptep) { sp = sptep_to_sp(sptep); /* * We cannot do huge page mapping for indirect shadow pages, * which are found on the last rmap (level = 1) when not using * tdp; such shadow pages are synced with the page table in * the guest, and the guest page table is using 4K page size * mapping if the indirect sp has level = 1. */ if (sp->role.direct && sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) { kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); if (kvm_available_flush_remote_tlbs_range()) kvm_flush_remote_tlbs_sptep(kvm, sptep); else need_tlb_flush = 1; goto restart; } } return need_tlb_flush; } EXPORT_SYMBOL_GPL(kvm_zap_gfn_range); static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) { /* * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap * pages that are already mapped at the maximum hugepage level. */ if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true)) kvm_flush_remote_tlbs_memslot(kvm, slot); } void kvm_mmu_recover_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot) { if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); kvm_rmap_zap_collapsible_sptes(kvm, slot); write_unlock(&kvm->mmu_lock); } if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_recover_huge_pages(kvm, slot); read_unlock(&kvm->mmu_lock); } } void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, const struct kvm_memory_slot *memslot) { if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); /* * Clear dirty bits only on 4k SPTEs since the legacy MMU only * support dirty logging at a 4k granularity. */ walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false); write_unlock(&kvm->mmu_lock); } if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); read_unlock(&kvm->mmu_lock); } /* * The caller will flush the TLBs after this function returns. * * It's also safe to flush TLBs out of mmu lock here as currently this * function is only used for dirty logging, in which case flushing TLB * out of mmu lock also guarantees no dirty pages will be lost in * dirty_bitmap. */ } static void kvm_mmu_zap_all(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); int ign; write_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { if (WARN_ON_ONCE(sp->role.invalid)) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; if (cond_resched_rwlock_write(&kvm->mmu_lock)) goto restart; } kvm_mmu_commit_zap_page(kvm, &invalid_list); if (tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); write_unlock(&kvm->mmu_lock); } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_zap_all(kvm); } static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm, struct kvm_memory_slot *slot, bool flush) { LIST_HEAD(invalid_list); unsigned long i; if (list_empty(&kvm->arch.active_mmu_pages)) goto out_flush; /* * Since accounting information is stored in struct kvm_arch_memory_slot, * all MMU pages that are shadowing guest PTEs must be zapped before the * memslot is deleted, as freeing such pages after the memslot is freed * will result in use-after-free, e.g. in unaccount_shadowed(). */ for (i = 0; i < slot->npages; i++) { struct kvm_mmu_page *sp; gfn_t gfn = slot->base_gfn + i; for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); flush = false; cond_resched_rwlock_write(&kvm->mmu_lock); } } out_flush: kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); } static void kvm_mmu_zap_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { struct kvm_gfn_range range = { .slot = slot, .start = slot->base_gfn, .end = slot->base_gfn + slot->npages, .may_block = true, }; bool flush; write_lock(&kvm->mmu_lock); flush = kvm_unmap_gfn_range(kvm, &range); kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush); write_unlock(&kvm->mmu_lock); } static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm) { return kvm->arch.vm_type == KVM_X86_DEFAULT_VM && kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { if (kvm_memslot_flush_zap_all(kvm)) kvm_mmu_zap_all_fast(kvm); else kvm_mmu_zap_memslot(kvm, slot); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); gen &= MMIO_SPTE_GEN_MASK; /* * Generation numbers are incremented in multiples of the number of * address spaces in order to provide unique generations across all * address spaces. Strip what is effectively the address space * modifier prior to checking for a wrap of the MMIO generation so * that a wrap in any address space is detected. */ gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1); /* * The very rare case: if the MMIO generation number has wrapped, * zap all shadow pages. */ if (unlikely(gen == 0)) { kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n"); kvm_mmu_zap_all_fast(kvm); } } static void mmu_destroy_caches(void) { kmem_cache_destroy(pte_list_desc_cache); kmem_cache_destroy(mmu_page_header_cache); } static void kvm_wake_nx_recovery_thread(struct kvm *kvm) { /* * The NX recovery thread is spawned on-demand at the first KVM_RUN and * may not be valid even though the VM is globally visible. Do nothing, * as such a VM can't have any possible NX huge pages. */ struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread); if (nx_thread) vhost_task_wake(nx_thread); } static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp) { if (nx_hugepage_mitigation_hard_disabled) return sysfs_emit(buffer, "never\n"); return param_get_bool(buffer, kp); } static bool get_nx_auto_mode(void) { /* Return true when CPU has the bug, and mitigations are ON */ return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); } static void __set_nx_huge_pages(bool val) { nx_huge_pages = itlb_multihit_kvm_mitigation = val; } static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) { bool old_val = nx_huge_pages; bool new_val; if (nx_hugepage_mitigation_hard_disabled) return -EPERM; /* In "auto" mode deploy workaround only if CPU has the bug. */ if (sysfs_streq(val, "off")) { new_val = 0; } else if (sysfs_streq(val, "force")) { new_val = 1; } else if (sysfs_streq(val, "auto")) { new_val = get_nx_auto_mode(); } else if (sysfs_streq(val, "never")) { new_val = 0; mutex_lock(&kvm_lock); if (!list_empty(&vm_list)) { mutex_unlock(&kvm_lock); return -EBUSY; } nx_hugepage_mitigation_hard_disabled = true; mutex_unlock(&kvm_lock); } else if (kstrtobool(val, &new_val) < 0) { return -EINVAL; } __set_nx_huge_pages(new_val); if (new_val != old_val) { struct kvm *kvm; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { mutex_lock(&kvm->slots_lock); kvm_mmu_zap_all_fast(kvm); mutex_unlock(&kvm->slots_lock); kvm_wake_nx_recovery_thread(kvm); } mutex_unlock(&kvm_lock); } return 0; } /* * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as * its default value of -1 is technically undefined behavior for a boolean. * Forward the module init call to SPTE code so that it too can handle module * params that need to be resolved/snapshot. */ void __init kvm_mmu_x86_module_init(void) { if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); /* * Snapshot userspace's desire to enable the TDP MMU. Whether or not the * TDP MMU is actually enabled is determined in kvm_configure_mmu() * when the vendor module is loaded. */ tdp_mmu_allowed = tdp_mmu_enabled; kvm_mmu_spte_module_init(); } /* * The bulk of the MMU initialization is deferred until the vendor module is * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need * to be reset when a potentially different vendor module is loaded. */ int kvm_mmu_vendor_module_init(void) { int ret = -ENOMEM; /* * MMU roles use union aliasing which is, generally speaking, an * undefined behavior. However, we supposedly know how compilers behave * and the current status quo is unlikely to change. Guardians below are * supposed to let us know if the assumption becomes false. */ BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64)); kvm_mmu_reset_all_pte_masks(); pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT); if (!pte_list_desc_cache) goto out; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), 0, SLAB_ACCOUNT, NULL); if (!mmu_page_header_cache) goto out; return 0; out: mmu_destroy_caches(); return ret; } void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); if (tdp_mmu_enabled) { read_lock(&vcpu->kvm->mmu_lock); mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa, NULL); read_unlock(&vcpu->kvm->mmu_lock); } free_mmu_pages(&vcpu->arch.root_mmu); free_mmu_pages(&vcpu->arch.guest_mmu); mmu_free_memory_caches(vcpu); } void kvm_mmu_vendor_module_exit(void) { mmu_destroy_caches(); } /* * Calculate the effective recovery period, accounting for '0' meaning "let KVM * select a halving time of 1 hour". Returns true if recovery is enabled. */ static bool calc_nx_huge_pages_recovery_period(uint *period) { /* * Use READ_ONCE to get the params, this may be called outside of the * param setters, e.g. by the kthread to compute its next timeout. */ bool enabled = READ_ONCE(nx_huge_pages); uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); if (!enabled || !ratio) return false; *period = READ_ONCE(nx_huge_pages_recovery_period_ms); if (!*period) { /* Make sure the period is not less than one second. */ ratio = min(ratio, 3600u); *period = 60 * 60 * 1000 / ratio; } return true; } static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp) { bool was_recovery_enabled, is_recovery_enabled; uint old_period, new_period; int err; if (nx_hugepage_mitigation_hard_disabled) return -EPERM; was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period); err = param_set_uint(val, kp); if (err) return err; is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period); if (is_recovery_enabled && (!was_recovery_enabled || old_period > new_period)) { struct kvm *kvm; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_wake_nx_recovery_thread(kvm); mutex_unlock(&kvm_lock); } return err; } static void kvm_recover_nx_huge_pages(struct kvm *kvm) { unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits; struct kvm_memory_slot *slot; int rcu_idx; struct kvm_mmu_page *sp; unsigned int ratio; LIST_HEAD(invalid_list); bool flush = false; ulong to_zap; rcu_idx = srcu_read_lock(&kvm->srcu); write_lock(&kvm->mmu_lock); /* * Zapping TDP MMU shadow pages, including the remote TLB flush, must * be done under RCU protection, because the pages are freed via RCU * callback. */ rcu_read_lock(); ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0; for ( ; to_zap; --to_zap) { if (list_empty(&kvm->arch.possible_nx_huge_pages)) break; /* * We use a separate list instead of just using active_mmu_pages * because the number of shadow pages that be replaced with an * NX huge page is expected to be relatively small compared to * the total number of shadow pages. And because the TDP MMU * doesn't use active_mmu_pages. */ sp = list_first_entry(&kvm->arch.possible_nx_huge_pages, struct kvm_mmu_page, possible_nx_huge_page_link); WARN_ON_ONCE(!sp->nx_huge_page_disallowed); WARN_ON_ONCE(!sp->role.direct); /* * Unaccount and do not attempt to recover any NX Huge Pages * that are being dirty tracked, as they would just be faulted * back in as 4KiB pages. The NX Huge Pages in this slot will be * recovered, along with all the other huge pages in the slot, * when dirty logging is disabled. * * Since gfn_to_memslot() is relatively expensive, it helps to * skip it if it the test cannot possibly return true. On the * other hand, if any memslot has logging enabled, chances are * good that all of them do, in which case unaccount_nx_huge_page() * is much cheaper than zapping the page. * * If a memslot update is in progress, reading an incorrect value * of kvm->nr_memslots_dirty_logging is not a problem: if it is * becoming zero, gfn_to_memslot() will be done unnecessarily; if * it is becoming nonzero, the page will be zapped unnecessarily. * Either way, this only affects efficiency in racy situations, * and not correctness. */ slot = NULL; if (atomic_read(&kvm->nr_memslots_dirty_logging)) { struct kvm_memslots *slots; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, sp->gfn); WARN_ON_ONCE(!slot); } if (slot && kvm_slot_dirty_track_enabled(slot)) unaccount_nx_huge_page(kvm, sp); else if (is_tdp_mmu_page(sp)) flush |= kvm_tdp_mmu_zap_sp(kvm, sp); else kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->nx_huge_page_disallowed); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); rcu_read_unlock(); cond_resched_rwlock_write(&kvm->mmu_lock); flush = false; rcu_read_lock(); } } kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); rcu_read_unlock(); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); } static void kvm_nx_huge_page_recovery_worker_kill(void *data) { } static bool kvm_nx_huge_page_recovery_worker(void *data) { struct kvm *kvm = data; bool enabled; uint period; long remaining_time; enabled = calc_nx_huge_pages_recovery_period(&period); if (!enabled) return false; remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period) - get_jiffies_64(); if (remaining_time > 0) { schedule_timeout(remaining_time); /* check for signals and come back */ return true; } __set_current_state(TASK_RUNNING); kvm_recover_nx_huge_pages(kvm); kvm->arch.nx_huge_page_last = get_jiffies_64(); return true; } static int kvm_mmu_start_lpage_recovery(struct once *once) { struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once); struct kvm *kvm = container_of(ka, struct kvm, arch); struct vhost_task *nx_thread; kvm->arch.nx_huge_page_last = get_jiffies_64(); nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill, kvm, "kvm-nx-lpage-recovery"); if (IS_ERR(nx_thread)) return PTR_ERR(nx_thread); vhost_task_start(nx_thread); /* Make the task visible only once it is fully started. */ WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread); return 0; } int kvm_mmu_post_init_vm(struct kvm *kvm) { if (nx_hugepage_mitigation_hard_disabled) return 0; return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); } void kvm_mmu_pre_destroy_vm(struct kvm *kvm) { if (kvm->arch.nx_huge_page_recovery_thread) vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread); } #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) { return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG; } static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) { lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG; } static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) { lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; } bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, struct kvm_gfn_range *range) { struct kvm_memory_slot *slot = range->slot; int level; /* * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM * can simply ignore such slots. But if userspace is making memory * PRIVATE, then KVM must prevent the guest from accessing the memory * as shared. And if userspace is making memory SHARED and this point * is reached, then at least one page within the range was previously * PRIVATE, i.e. the slot's possible hugepage ranges are changing. * Zapping SPTEs in this case ensures KVM will reassess whether or not * a hugepage can be used for affected ranges. */ if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm))) return false; if (WARN_ON_ONCE(range->end <= range->start)) return false; /* * If the head and tail pages of the range currently allow a hugepage, * i.e. reside fully in the slot and don't have mixed attributes, then * add each corresponding hugepage range to the ongoing invalidation, * e.g. to prevent KVM from creating a hugepage in response to a fault * for a gfn whose attributes aren't changing. Note, only the range * of gfns whose attributes are being modified needs to be explicitly * unmapped, as that will unmap any existing hugepages. */ for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { gfn_t start = gfn_round_for_level(range->start, level); gfn_t end = gfn_round_for_level(range->end - 1, level); gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level); if ((start != range->start || start + nr_pages > range->end) && start >= slot->base_gfn && start + nr_pages <= slot->base_gfn + slot->npages && !hugepage_test_mixed(slot, start, level)) kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages); if (end == start) continue; if ((end + nr_pages) > range->end && (end + nr_pages) <= (slot->base_gfn + slot->npages) && !hugepage_test_mixed(slot, end, level)) kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages); } /* Unmap the old attribute page. */ if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) range->attr_filter = KVM_FILTER_SHARED; else range->attr_filter = KVM_FILTER_PRIVATE; return kvm_unmap_gfn_range(kvm, range); } static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long attrs) { const unsigned long start = gfn; const unsigned long end = start + KVM_PAGES_PER_HPAGE(level); if (level == PG_LEVEL_2M) return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs); for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { if (hugepage_test_mixed(slot, gfn, level - 1) || attrs != kvm_get_memory_attributes(kvm, gfn)) return false; } return true; } bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, struct kvm_gfn_range *range) { unsigned long attrs = range->arg.attributes; struct kvm_memory_slot *slot = range->slot; int level; lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held(&kvm->slots_lock); /* * Calculate which ranges can be mapped with hugepages even if the slot * can't map memory PRIVATE. KVM mustn't create a SHARED hugepage over * a range that has PRIVATE GFNs, and conversely converting a range to * SHARED may now allow hugepages. */ if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm))) return false; /* * The sequence matters here: upper levels consume the result of lower * level's scanning. */ for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level); gfn_t gfn = gfn_round_for_level(range->start, level); /* Process the head page if it straddles the range. */ if (gfn != range->start || gfn + nr_pages > range->end) { /* * Skip mixed tracking if the aligned gfn isn't covered * by the memslot, KVM can't use a hugepage due to the * misaligned address regardless of memory attributes. */ if (gfn >= slot->base_gfn && gfn + nr_pages <= slot->base_gfn + slot->npages) { if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) hugepage_clear_mixed(slot, gfn, level); else hugepage_set_mixed(slot, gfn, level); } gfn += nr_pages; } /* * Pages entirely covered by the range are guaranteed to have * only the attributes which were just set. */ for ( ; gfn + nr_pages <= range->end; gfn += nr_pages) hugepage_clear_mixed(slot, gfn, level); /* * Process the last tail page if it straddles the range and is * contained by the memslot. Like the head page, KVM can't * create a hugepage if the slot size is misaligned. */ if (gfn < range->end && (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) { if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) hugepage_clear_mixed(slot, gfn, level); else hugepage_set_mixed(slot, gfn, level); } } return false; } void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, struct kvm_memory_slot *slot) { int level; if (!kvm_arch_has_private_mem(kvm)) return; for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { /* * Don't bother tracking mixed attributes for pages that can't * be huge due to alignment, i.e. process only pages that are * entirely contained by the memslot. */ gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level); gfn_t start = gfn_round_for_level(slot->base_gfn, level); gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level); gfn_t gfn; if (start < slot->base_gfn) start += nr_pages; /* * Unlike setting attributes, every potential hugepage needs to * be manually checked as the attributes may already be mixed. */ for (gfn = start; gfn < end; gfn += nr_pages) { unsigned long attrs = kvm_get_memory_attributes(kvm, gfn); if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) hugepage_clear_mixed(slot, gfn, level); else hugepage_set_mixed(slot, gfn, level); } } } #endif |
164 16 12 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KBD_KERN_H #define _KBD_KERN_H #include <linux/tty.h> #include <linux/interrupt.h> #include <linux/keyboard.h> extern char *func_table[MAX_NR_FUNC]; /* * kbd->xxx contains the VC-local things (flag settings etc..) * * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h * The code in KDGETLED / KDSETLED depends on the internal and * external order being the same. * * Note: lockstate is used as index in the array key_map. */ struct kbd_struct { unsigned char lockstate; /* 8 modifiers - the names do not have any meaning at all; they can be associated to arbitrarily chosen keys */ #define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */ #define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */ #define VC_CTRLLOCK KG_CTRL /* control lock mode */ #define VC_ALTLOCK KG_ALT /* alt lock mode */ #define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */ #define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */ #define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */ #define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */ unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */ unsigned char ledmode:1; #define LED_SHOW_FLAGS 0 /* traditional state */ #define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */ unsigned char ledflagstate:4; /* flags, not lights */ unsigned char default_ledflagstate:4; #define VC_SCROLLOCK 0 /* scroll-lock mode */ #define VC_NUMLOCK 1 /* numeric lock mode */ #define VC_CAPSLOCK 2 /* capslock mode */ #define VC_KANALOCK 3 /* kanalock mode */ unsigned char kbdmode:3; /* one 3-bit value */ #define VC_XLATE 0 /* translate keycodes using keymap */ #define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */ #define VC_RAW 2 /* raw (scancode) mode */ #define VC_UNICODE 3 /* Unicode mode */ #define VC_OFF 4 /* disabled mode */ unsigned char modeflags:5; #define VC_APPLIC 0 /* application key mode */ #define VC_CKMODE 1 /* cursor key mode */ #define VC_REPEAT 2 /* keyboard repeat */ #define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */ #define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */ }; extern int kbd_init(void); extern void setledstate(struct kbd_struct *kbd, unsigned int led); extern int do_poke_blanked_console; extern void (*kbd_ledfunc)(unsigned int led); extern int set_console(int nr); extern void schedule_console_callback(void); static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) { return ((kbd->modeflags >> flag) & 1); } static inline int vc_kbd_led(struct kbd_struct * kbd, int flag) { return ((kbd->ledflagstate >> flag) & 1); } static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags |= 1 << flag; } static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate |= 1 << flag; } static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags &= ~(1 << flag); } static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate &= ~(1 << flag); } static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag) { kbd->lockstate ^= 1 << flag; } static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag) { kbd->slockstate ^= 1 << flag; } static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags ^= 1 << flag; } static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate ^= 1 << flag; } #define U(x) ((x) ^ 0xf000) #define BRL_UC_ROW 0x2800 /* keyboard.c */ struct console; void vt_set_leds_compute_shiftstate(void); /* defkeymap.c */ extern unsigned int keymap_count; #endif |
1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Subdriver for Scopium astro-camera (DTCS033, 0547:7303) * * Copyright (C) 2014 Robert Butora (robert.butora.fi@gmail.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "dtcs033" #include "gspca.h" MODULE_AUTHOR("Robert Butora <robert.butora.fi@gmail.com>"); MODULE_DESCRIPTION("Scopium DTCS033 astro-cam USB Camera Driver"); MODULE_LICENSE("GPL"); struct dtcs033_usb_requests { u8 bRequestType; u8 bRequest; u16 wValue; u16 wIndex; u16 wLength; }; /* send a usb request */ static void reg_rw(struct gspca_dev *gspca_dev, u8 bRequestType, u8 bRequest, u16 wValue, u16 wIndex, u16 wLength) { struct usb_device *udev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), bRequest, bRequestType, wValue, wIndex, gspca_dev->usb_buf, wLength, 500); if (ret < 0) { gspca_dev->usb_err = ret; pr_err("usb_control_msg error %d\n", ret); } return; } /* send several usb in/out requests */ static int reg_reqs(struct gspca_dev *gspca_dev, const struct dtcs033_usb_requests *preqs, int n_reqs) { int i = 0; const struct dtcs033_usb_requests *preq; while ((i < n_reqs) && (gspca_dev->usb_err >= 0)) { preq = &preqs[i]; reg_rw(gspca_dev, preq->bRequestType, preq->bRequest, preq->wValue, preq->wIndex, preq->wLength); if (gspca_dev->usb_err < 0) { gspca_err(gspca_dev, "usb error request no: %d / %d\n", i, n_reqs); } else if (preq->bRequestType & USB_DIR_IN) { gspca_dbg(gspca_dev, D_STREAM, "USB IN (%d) returned[%d] %3ph %s", i, preq->wLength, gspca_dev->usb_buf, preq->wLength > 3 ? "...\n" : "\n"); } i++; } return gspca_dev->usb_err; } /* -- subdriver interface implementation -- */ #define DT_COLS (640) static const struct v4l2_pix_format dtcs033_mode[] = { /* raw Bayer patterned output */ {DT_COLS, 480, V4L2_PIX_FMT_GREY, V4L2_FIELD_NONE, .bytesperline = DT_COLS, .sizeimage = DT_COLS*480, .colorspace = V4L2_COLORSPACE_SRGB, }, /* this mode will demosaic the Bayer pattern */ {DT_COLS, 480, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE, .bytesperline = DT_COLS, .sizeimage = DT_COLS*480, .colorspace = V4L2_COLORSPACE_SRGB, } }; /* config called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = dtcs033_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(dtcs033_mode); gspca_dev->cam.bulk = 1; gspca_dev->cam.bulk_nurbs = 1; gspca_dev->cam.bulk_size = DT_COLS*512; return 0; } /* init called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* start stop the camera */ static int dtcs033_start(struct gspca_dev *gspca_dev); static void dtcs033_stopN(struct gspca_dev *gspca_dev); /* intercept camera image data */ static void dtcs033_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* packet data */ int len) /* packet data length */ { /* drop incomplete frames */ if (len != DT_COLS*512) { gspca_dev->last_packet_type = DISCARD_PACKET; /* gspca.c: discard invalidates the whole frame. */ return; } /* forward complete frames */ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, INTER_PACKET, data + 16*DT_COLS, len - 32*DT_COLS); /* skip first & last 16 lines */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); return; } /* -- controls: exposure and gain -- */ static void dtcs033_setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain) { /* gain [dB] encoding */ u16 sGain = (u16)gain; u16 gainVal = 224+(sGain-14)*(768-224)/(33-14); u16 wIndex = 0x0100|(0x00FF&gainVal); u16 wValue = (0xFF00&gainVal)>>8; /* exposure time [msec] encoding */ u16 sXTime = (u16)expo; u16 xtimeVal = (524*(150-(sXTime-1)))/150; const u8 bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; const u8 bRequest = 0x18; reg_rw(gspca_dev, bRequestType, bRequest, wValue, wIndex, 0); if (gspca_dev->usb_err < 0) gspca_err(gspca_dev, "usb error in setexposure(gain) sequence\n"); reg_rw(gspca_dev, bRequestType, bRequest, (xtimeVal<<4), 0x6300, 0); if (gspca_dev->usb_err < 0) gspca_err(gspca_dev, "usb error in setexposure(time) sequence\n"); } /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev;/* !! must be the first item */ /* exposure & gain controls */ struct { struct v4l2_ctrl *exposure; struct v4l2_ctrl *gain; }; }; static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: dtcs033_setexposure(gspca_dev, ctrl->val, sd->gain->val); break; case V4L2_CID_GAIN: dtcs033_setexposure(gspca_dev, sd->exposure->val, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int dtcs033_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; struct sd *sd = (struct sd *) gspca_dev; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 2); /* min max step default */ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 1, 150, 1, 75);/* [msec] */ sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 14, 33, 1, 24);/* [dB] */ if (hdl->error) { gspca_err(gspca_dev, "Could not initialize controls: %d\n", hdl->error); return hdl->error; } v4l2_ctrl_cluster(2, &sd->exposure); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = dtcs033_start, .stopN = dtcs033_stopN, .pkt_scan = dtcs033_pkt_scan, .init_controls = dtcs033_init_controls, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0547, 0x7303)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* device connect */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); /* --------------------------------------------------------- USB requests to start/stop the camera [USB 2.0 spec Ch.9]. bRequestType : 0x40 = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0xC0 = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, */ static const struct dtcs033_usb_requests dtcs033_start_reqs[] = { /* -- bRequest,wValue,wIndex,wLength */ { 0x40, 0x01, 0x0001, 0x000F, 0x0000 }, { 0x40, 0x01, 0x0000, 0x000F, 0x0000 }, { 0x40, 0x01, 0x0001, 0x000F, 0x0000 }, { 0x40, 0x18, 0x0000, 0x7F00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1001, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0004, 0x0000 }, { 0x40, 0x18, 0x0000, 0x7F01, 0x0000 }, { 0x40, 0x18, 0x30E0, 0x0009, 0x0000 }, { 0x40, 0x18, 0x0500, 0x012C, 0x0000 }, { 0x40, 0x18, 0x0380, 0x0200, 0x0000 }, { 0x40, 0x18, 0x0000, 0x035C, 0x0000 }, { 0x40, 0x18, 0x05C0, 0x0438, 0x0000 }, { 0x40, 0x18, 0x0440, 0x0500, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0668, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0700, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0800, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0900, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0A00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0B00, 0x0000 }, { 0x40, 0x18, 0x30E0, 0x6009, 0x0000 }, { 0x40, 0x18, 0x0500, 0x612C, 0x0000 }, { 0x40, 0x18, 0x2090, 0x6274, 0x0000 }, { 0x40, 0x18, 0x05C0, 0x6338, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6400, 0x0000 }, { 0x40, 0x18, 0x05C0, 0x6538, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6600, 0x0000 }, { 0x40, 0x18, 0x0680, 0x6744, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6800, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6900, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6A00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6B00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6C00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6D00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6E00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x808C, 0x0000 }, { 0x40, 0x18, 0x0010, 0x8101, 0x0000 }, { 0x40, 0x18, 0x30E0, 0x8200, 0x0000 }, { 0x40, 0x18, 0x0810, 0x832C, 0x0000 }, { 0x40, 0x18, 0x0680, 0x842B, 0x0000 }, { 0x40, 0x18, 0x0000, 0x8500, 0x0000 }, { 0x40, 0x18, 0x0000, 0x8600, 0x0000 }, { 0x40, 0x18, 0x0280, 0x8715, 0x0000 }, { 0x40, 0x18, 0x0000, 0x880C, 0x0000 }, { 0x40, 0x18, 0x0010, 0x8901, 0x0000 }, { 0x40, 0x18, 0x30E0, 0x8A00, 0x0000 }, { 0x40, 0x18, 0x0810, 0x8B2C, 0x0000 }, { 0x40, 0x18, 0x0680, 0x8C2B, 0x0000 }, { 0x40, 0x18, 0x0000, 0x8D00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x8E00, 0x0000 }, { 0x40, 0x18, 0x0280, 0x8F15, 0x0000 }, { 0x40, 0x18, 0x0010, 0xD040, 0x0000 }, { 0x40, 0x18, 0x0000, 0xD100, 0x0000 }, { 0x40, 0x18, 0x00B0, 0xD20A, 0x0000 }, { 0x40, 0x18, 0x0000, 0xD300, 0x0000 }, { 0x40, 0x18, 0x30E2, 0xD40D, 0x0000 }, { 0x40, 0x18, 0x0001, 0xD5C0, 0x0000 }, { 0x40, 0x18, 0x00A0, 0xD60A, 0x0000 }, { 0x40, 0x18, 0x0000, 0xD700, 0x0000 }, { 0x40, 0x18, 0x0000, 0x7F00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1501, 0x0000 }, { 0x40, 0x18, 0x0001, 0x01FF, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0200, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0304, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1101, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1201, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1300, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1400, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1601, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1800, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1900, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1A00, 0x0000 }, { 0x40, 0x18, 0x2000, 0x1B00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1C00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x2100, 0x0000 }, { 0x40, 0x18, 0x00C0, 0x228E, 0x0000 }, { 0x40, 0x18, 0x0000, 0x3001, 0x0000 }, { 0x40, 0x18, 0x0010, 0x3101, 0x0000 }, { 0x40, 0x18, 0x0008, 0x3301, 0x0000 }, { 0x40, 0x18, 0x0000, 0x3400, 0x0000 }, { 0x40, 0x18, 0x0012, 0x3549, 0x0000 }, { 0x40, 0x18, 0x0000, 0x3620, 0x0000 }, { 0x40, 0x18, 0x0001, 0x3700, 0x0000 }, { 0x40, 0x18, 0x0000, 0x4000, 0x0000 }, { 0x40, 0x18, 0xFFFF, 0x41FF, 0x0000 }, { 0x40, 0x18, 0xFFFF, 0x42FF, 0x0000 }, { 0x40, 0x18, 0x0000, 0x500F, 0x0000 }, { 0x40, 0x18, 0x2272, 0x5108, 0x0000 }, { 0x40, 0x18, 0x2272, 0x5208, 0x0000 }, { 0x40, 0x18, 0xFFFF, 0x53FF, 0x0000 }, { 0x40, 0x18, 0xFFFF, 0x54FF, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6000, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6102, 0x0000 }, { 0x40, 0x18, 0x0010, 0x6214, 0x0000 }, { 0x40, 0x18, 0x0C80, 0x6300, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6401, 0x0000 }, { 0x40, 0x18, 0x0680, 0x6551, 0x0000 }, { 0x40, 0x18, 0xFFFF, 0x66FF, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6702, 0x0000 }, { 0x40, 0x18, 0x0010, 0x6800, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6900, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6A00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6B00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6C00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6D01, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6E00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x6F00, 0x0000 }, { 0x40, 0x18, 0x0000, 0x7000, 0x0000 }, { 0x40, 0x18, 0x0001, 0x7118, 0x0000 }, { 0x40, 0x18, 0x0000, 0x2001, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1101, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1301, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1300, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1501, 0x0000 }, { 0xC0, 0x11, 0x0000, 0x24C0, 0x0003 }, { 0x40, 0x18, 0x0000, 0x3000, 0x0000 }, { 0x40, 0x18, 0x0000, 0x3620, 0x0000 }, { 0x40, 0x18, 0x0000, 0x1501, 0x0000 }, { 0x40, 0x18, 0x0010, 0x6300, 0x0000 }, { 0x40, 0x18, 0x0002, 0x01F0, 0x0000 }, { 0x40, 0x01, 0x0003, 0x000F, 0x0000 } }; static const struct dtcs033_usb_requests dtcs033_stop_reqs[] = { /* -- bRequest,wValue,wIndex,wLength */ { 0x40, 0x01, 0x0001, 0x000F, 0x0000 }, { 0x40, 0x01, 0x0000, 0x000F, 0x0000 }, { 0x40, 0x18, 0x0000, 0x0003, 0x0000 } }; static int dtcs033_start(struct gspca_dev *gspca_dev) { return reg_reqs(gspca_dev, dtcs033_start_reqs, ARRAY_SIZE(dtcs033_start_reqs)); } static void dtcs033_stopN(struct gspca_dev *gspca_dev) { reg_reqs(gspca_dev, dtcs033_stop_reqs, ARRAY_SIZE(dtcs033_stop_reqs)); return; } |
4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 | /* * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> * Copyright (c) 2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Christoph Hellwig <hch@lst.de> * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org> * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * This driver is based on the uath driver written by Damien Bergamini for * OpenBSD, who did black-box analysis of the Windows binary driver to find * out how the hardware works. It contains a lot magic numbers because of * that and only has minimal functionality. */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/completion.h> #include <linux/firmware.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <net/mac80211.h> #include "ar5523.h" #include "ar5523_hw.h" /* * Various supported device vendors/products. * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g */ static int ar5523_submit_rx_cmd(struct ar5523 *ar); static void ar5523_data_tx_pkt_put(struct ar5523 *ar); static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr, struct ar5523_tx_cmd *cmd) { int dlen, olen; __be32 *rp; dlen = be32_to_cpu(hdr->len) - sizeof(*hdr); if (dlen < 0) { WARN_ON(1); goto out; } ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff, dlen); rp = (__be32 *)(hdr + 1); if (dlen >= sizeof(u32)) { olen = be32_to_cpu(rp[0]); dlen -= sizeof(u32); if (olen == 0) { /* convention is 0 =>'s one word */ olen = sizeof(u32); } } else olen = 0; if (cmd->odata) { if (cmd->olen < olen) { ar5523_err(ar, "olen too small %d < %d\n", cmd->olen, olen); cmd->olen = 0; cmd->res = -EOVERFLOW; } else { cmd->olen = olen; memcpy(cmd->odata, &rp[1], olen); cmd->res = 0; } } out: complete(&cmd->done); } static void ar5523_cmd_rx_cb(struct urb *urb) { struct ar5523 *ar = urb->context; struct ar5523_tx_cmd *cmd = &ar->tx_cmd; struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf; int dlen; u32 code, hdrlen; if (urb->status) { if (urb->status != -ESHUTDOWN) ar5523_err(ar, "RX USB error %d.\n", urb->status); goto skip; } if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) { ar5523_err(ar, "RX USB too short.\n"); goto skip; } ar5523_dbg(ar, "%s code %02x priv %d\n", __func__, be32_to_cpu(hdr->code) & 0xff, hdr->priv); code = be32_to_cpu(hdr->code); hdrlen = be32_to_cpu(hdr->len); switch (code & 0xff) { default: /* reply to a read command */ if (hdr->priv != AR5523_CMD_ID) { ar5523_err(ar, "Unexpected command id: %02x\n", code & 0xff); goto skip; } ar5523_read_reply(ar, hdr, cmd); break; case WDCMSG_DEVICE_AVAIL: ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n"); cmd->res = 0; cmd->olen = 0; complete(&cmd->done); break; case WDCMSG_SEND_COMPLETE: ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n", atomic_read(&ar->tx_nr_pending)); if (!test_bit(AR5523_HW_UP, &ar->flags)) ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n"); else { mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT); ar5523_data_tx_pkt_put(ar); } break; case WDCMSG_TARGET_START: /* This command returns a bogus id so it needs special handling */ dlen = hdrlen - sizeof(*hdr); if (dlen != (int)sizeof(u32)) { ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } if (!cmd->odata) { ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); return; } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; complete(&cmd->done); break; case WDCMSG_STATS_UPDATE: ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n"); break; } skip: ar5523_submit_rx_cmd(ar); } static int ar5523_alloc_rx_cmd(struct ar5523 *ar) { ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL); if (!ar->rx_cmd_urb) return -ENOMEM; ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ, GFP_KERNEL, &ar->rx_cmd_urb->transfer_dma); if (!ar->rx_cmd_buf) { usb_free_urb(ar->rx_cmd_urb); return -ENOMEM; } return 0; } static void ar5523_cancel_rx_cmd(struct ar5523 *ar) { usb_kill_urb(ar->rx_cmd_urb); } static void ar5523_free_rx_cmd(struct ar5523 *ar) { usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma); usb_free_urb(ar->rx_cmd_urb); } static int ar5523_submit_rx_cmd(struct ar5523 *ar) { int error; usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev, ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf, AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar); ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC); if (error) { if (error != -ENODEV) ar5523_err(ar, "error %d when submitting rx urb\n", error); return error; } return 0; } /* * Command submitted cb */ static void ar5523_cmd_tx_cb(struct urb *urb) { struct ar5523_tx_cmd *cmd = urb->context; struct ar5523 *ar = cmd->ar; if (urb->status) { ar5523_err(ar, "Failed to TX command. Status = %d\n", urb->status); cmd->res = urb->status; complete(&cmd->done); return; } if (!(cmd->flags & AR5523_CMD_FLAG_READ)) { cmd->res = 0; complete(&cmd->done); } } static void ar5523_cancel_tx_cmd(struct ar5523 *ar) { usb_kill_urb(ar->tx_cmd.urb_tx); } static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, int ilen, void *odata, int olen, int flags) { struct ar5523_cmd_hdr *hdr; struct ar5523_tx_cmd *cmd = &ar->tx_cmd; int xferlen, error; /* always bulk-out a multiple of 4 bytes */ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3; hdr = cmd->buf_tx; memset(hdr, 0, sizeof(struct ar5523_cmd_hdr)); hdr->len = cpu_to_be32(xferlen); hdr->code = cpu_to_be32(code); hdr->priv = AR5523_CMD_ID; if (flags & AR5523_CMD_FLAG_MAGIC) hdr->magic = cpu_to_be32(1 << 24); if (ilen) memcpy(hdr + 1, idata, ilen); cmd->odata = odata; cmd->olen = olen; cmd->flags = flags; ar5523_dbg(ar, "do cmd %02x\n", code); usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev), cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd); cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL); if (error) { ar5523_err(ar, "could not send command 0x%x, error=%d\n", code, error); return error; } if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) { ar5523_cancel_tx_cmd(ar); cmd->odata = NULL; ar5523_err(ar, "timeout waiting for command %02x reply\n", code); cmd->res = -ETIMEDOUT; } return cmd->res; } static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data, int len, int flags) { flags &= ~AR5523_CMD_FLAG_READ; return ar5523_cmd(ar, code, data, len, NULL, 0, flags); } static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata, int ilen, void *odata, int olen, int flags) { flags |= AR5523_CMD_FLAG_READ; return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags); } static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val) { struct ar5523_write_mac write; int error; write.reg = cpu_to_be32(reg); write.len = cpu_to_be32(0); /* 0 = single write */ *(__be32 *)write.data = cpu_to_be32(val); error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write, 3 * sizeof(u32), 0); if (error != 0) ar5523_err(ar, "could not write register 0x%02x\n", reg); return error; } static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data, int len) { struct ar5523_write_mac write; int error; write.reg = cpu_to_be32(reg); write.len = cpu_to_be32(len); memcpy(write.data, data, len); /* properly handle the case where len is zero (reset) */ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write, (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0); if (error != 0) ar5523_err(ar, "could not write %d bytes to register 0x%02x\n", len, reg); return error; } static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata, int olen) { int error; __be32 which_be; which_be = cpu_to_be32(which); error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS, &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC); if (error != 0) ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which); return error; } static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val) { int error; __be32 cap_be, val_be; cap_be = cpu_to_be32(cap); error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be, sizeof(cap_be), &val_be, sizeof(__be32), AR5523_CMD_FLAG_MAGIC); if (error != 0) { ar5523_err(ar, "could not read capability %u\n", cap); return error; } *val = be32_to_cpu(val_be); return error; } static int ar5523_get_devcap(struct ar5523 *ar) { #define GETCAP(x) do { \ error = ar5523_get_capability(ar, x, &cap); \ if (error != 0) \ return error; \ ar5523_info(ar, "Cap: " \ "%s=0x%08x\n", #x, cap); \ } while (0) int error; u32 cap; /* collect device capabilities */ GETCAP(CAP_TARGET_VERSION); GETCAP(CAP_TARGET_REVISION); GETCAP(CAP_MAC_VERSION); GETCAP(CAP_MAC_REVISION); GETCAP(CAP_PHY_REVISION); GETCAP(CAP_ANALOG_5GHz_REVISION); GETCAP(CAP_ANALOG_2GHz_REVISION); GETCAP(CAP_REG_DOMAIN); GETCAP(CAP_REG_CAP_BITS); GETCAP(CAP_WIRELESS_MODES); GETCAP(CAP_CHAN_SPREAD_SUPPORT); GETCAP(CAP_COMPRESS_SUPPORT); GETCAP(CAP_BURST_SUPPORT); GETCAP(CAP_FAST_FRAMES_SUPPORT); GETCAP(CAP_CHAP_TUNING_SUPPORT); GETCAP(CAP_TURBOG_SUPPORT); GETCAP(CAP_TURBO_PRIME_SUPPORT); GETCAP(CAP_DEVICE_TYPE); GETCAP(CAP_WME_SUPPORT); GETCAP(CAP_TOTAL_QUEUES); GETCAP(CAP_CONNECTION_ID_MAX); GETCAP(CAP_LOW_5GHZ_CHAN); GETCAP(CAP_HIGH_5GHZ_CHAN); GETCAP(CAP_LOW_2GHZ_CHAN); GETCAP(CAP_HIGH_2GHZ_CHAN); GETCAP(CAP_TWICE_ANTENNAGAIN_5G); GETCAP(CAP_TWICE_ANTENNAGAIN_2G); GETCAP(CAP_CIPHER_AES_CCM); GETCAP(CAP_CIPHER_TKIP); GETCAP(CAP_MIC_TKIP); return 0; } static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode) { struct ar5523_cmd_ledsteady led; led.lednum = cpu_to_be32(lednum); led.ledmode = cpu_to_be32(ledmode); ar5523_dbg(ar, "set %s led %s (steady)\n", (lednum == UATH_LED_LINK) ? "link" : "activity", ledmode ? "on" : "off"); return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led), 0); } static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op) { struct ar5523_cmd_rx_filter rxfilter; rxfilter.bits = cpu_to_be32(bits); rxfilter.op = cpu_to_be32(op); ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op); return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter, sizeof(rxfilter), 0); } static int ar5523_reset_tx_queues(struct ar5523 *ar) { __be32 qid = cpu_to_be32(0); ar5523_dbg(ar, "resetting Tx queue\n"); return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE, &qid, sizeof(qid), 0); } static int ar5523_set_chan(struct ar5523 *ar) { struct ieee80211_conf *conf = &ar->hw->conf; struct ar5523_cmd_reset reset; memset(&reset, 0, sizeof(reset)); reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ); reset.flags |= cpu_to_be32(UATH_CHAN_OFDM); reset.freq = cpu_to_be32(conf->chandef.chan->center_freq); reset.maxrdpower = cpu_to_be32(50); /* XXX */ reset.channelchange = cpu_to_be32(1); reset.keeprccontent = cpu_to_be32(0); ar5523_dbg(ar, "set chan flags 0x%x freq %d\n", be32_to_cpu(reset.flags), conf->chandef.chan->center_freq); return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0); } static int ar5523_queue_init(struct ar5523 *ar) { struct ar5523_cmd_txq_setup qinfo; ar5523_dbg(ar, "setting up Tx queue\n"); qinfo.qid = cpu_to_be32(0); qinfo.len = cpu_to_be32(sizeof(qinfo.attr)); qinfo.attr.priority = cpu_to_be32(0); /* XXX */ qinfo.attr.aifs = cpu_to_be32(3); qinfo.attr.logcwmin = cpu_to_be32(4); qinfo.attr.logcwmax = cpu_to_be32(10); qinfo.attr.bursttime = cpu_to_be32(0); qinfo.attr.mode = cpu_to_be32(0); qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */ return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo, sizeof(qinfo), 0); } static int ar5523_switch_chan(struct ar5523 *ar) { int error; error = ar5523_set_chan(ar); if (error) { ar5523_err(ar, "could not set chan, error %d\n", error); goto out_err; } /* reset Tx rings */ error = ar5523_reset_tx_queues(ar); if (error) { ar5523_err(ar, "could not reset Tx queues, error %d\n", error); goto out_err; } /* set Tx rings WME properties */ error = ar5523_queue_init(ar); if (error) ar5523_err(ar, "could not init wme, error %d\n", error); out_err: return error; } static void ar5523_rx_data_put(struct ar5523 *ar, struct ar5523_rx_data *data) { unsigned long flags; spin_lock_irqsave(&ar->rx_data_list_lock, flags); list_move(&data->list, &ar->rx_data_free); spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); } static void ar5523_data_rx_cb(struct urb *urb) { struct ar5523_rx_data *data = urb->context; struct ar5523 *ar = data->ar; struct ar5523_rx_desc *desc; struct ar5523_chunk *chunk; struct ieee80211_hw *hw = ar->hw; struct ieee80211_rx_status *rx_status; u32 rxlen; int usblen = urb->actual_length; int hdrlen, pad; ar5523_dbg(ar, "%s\n", __func__); /* sync/async unlink faults aren't errors */ if (urb->status) { if (urb->status != -ESHUTDOWN) ar5523_err(ar, "%s: USB err: %d\n", __func__, urb->status); goto skip; } if (usblen < AR5523_MIN_RXBUFSZ) { ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen); goto skip; } chunk = (struct ar5523_chunk *) data->skb->data; if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) || chunk->seqnum != 0) { ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n", chunk->seqnum, chunk->flags, be16_to_cpu(chunk->length)); goto skip; } /* Rx descriptor is located at the end, 32-bit aligned */ desc = (struct ar5523_rx_desc *) (data->skb->data + usblen - sizeof(struct ar5523_rx_desc)); rxlen = be32_to_cpu(desc->len); if (rxlen > ar->rxbufsz) { ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n", be32_to_cpu(desc->len)); goto skip; } if (!rxlen) { ar5523_dbg(ar, "RX: rxlen is 0\n"); goto skip; } if (be32_to_cpu(desc->status) != 0) { ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n", be32_to_cpu(desc->status), be32_to_cpu(desc->len)); goto skip; } skb_reserve(data->skb, sizeof(*chunk)); skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc)); hdrlen = ieee80211_get_hdrlen_from_skb(data->skb); if (!IS_ALIGNED(hdrlen, 4)) { ar5523_dbg(ar, "eek, alignment workaround activated\n"); pad = ALIGN(hdrlen, 4) - hdrlen; memmove(data->skb->data + pad, data->skb->data, hdrlen); skb_pull(data->skb, pad); skb_put(data->skb, pad); } rx_status = IEEE80211_SKB_RXCB(data->skb); memset(rx_status, 0, sizeof(*rx_status)); rx_status->freq = be32_to_cpu(desc->channel); rx_status->band = hw->conf.chandef.chan->band; rx_status->signal = -95 + be32_to_cpu(desc->rssi); ieee80211_rx_irqsafe(hw, data->skb); data->skb = NULL; skip: if (data->skb) { dev_kfree_skb_irq(data->skb); data->skb = NULL; } ar5523_rx_data_put(ar, data); if (atomic_inc_return(&ar->rx_data_free_cnt) >= AR5523_RX_DATA_REFILL_COUNT && test_bit(AR5523_HW_UP, &ar->flags)) queue_work(ar->wq, &ar->rx_refill_work); } static void ar5523_rx_refill_work(struct work_struct *work) { struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work); struct ar5523_rx_data *data; unsigned long flags; int error; ar5523_dbg(ar, "%s\n", __func__); do { spin_lock_irqsave(&ar->rx_data_list_lock, flags); if (!list_empty(&ar->rx_data_free)) data = (struct ar5523_rx_data *) ar->rx_data_free.next; else data = NULL; spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); if (!data) goto done; data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL); if (!data->skb) { ar5523_err(ar, "could not allocate rx skbuff\n"); return; } usb_fill_bulk_urb(data->urb, ar->dev, ar5523_data_rx_pipe(ar->dev), data->skb->data, ar->rxbufsz, ar5523_data_rx_cb, data); spin_lock_irqsave(&ar->rx_data_list_lock, flags); list_move(&data->list, &ar->rx_data_used); spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); atomic_dec(&ar->rx_data_free_cnt); error = usb_submit_urb(data->urb, GFP_KERNEL); if (error) { kfree_skb(data->skb); if (error != -ENODEV) ar5523_err(ar, "Err sending rx data urb %d\n", error); ar5523_rx_data_put(ar, data); atomic_inc(&ar->rx_data_free_cnt); return; } } while (true); done: return; } static void ar5523_cancel_rx_bufs(struct ar5523 *ar) { struct ar5523_rx_data *data; unsigned long flags; do { spin_lock_irqsave(&ar->rx_data_list_lock, flags); if (!list_empty(&ar->rx_data_used)) data = (struct ar5523_rx_data *) ar->rx_data_used.next; else data = NULL; spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); if (!data) break; usb_kill_urb(data->urb); list_move(&data->list, &ar->rx_data_free); atomic_inc(&ar->rx_data_free_cnt); } while (data); } static void ar5523_free_rx_bufs(struct ar5523 *ar) { struct ar5523_rx_data *data; ar5523_cancel_rx_bufs(ar); while (!list_empty(&ar->rx_data_free)) { data = (struct ar5523_rx_data *) ar->rx_data_free.next; list_del(&data->list); usb_free_urb(data->urb); } } static int ar5523_alloc_rx_bufs(struct ar5523 *ar) { int i; for (i = 0; i < AR5523_RX_DATA_COUNT; i++) { struct ar5523_rx_data *data = &ar->rx_data[i]; data->ar = ar; data->urb = usb_alloc_urb(0, GFP_KERNEL); if (!data->urb) goto err; list_add_tail(&data->list, &ar->rx_data_free); atomic_inc(&ar->rx_data_free_cnt); } return 0; err: ar5523_free_rx_bufs(ar); return -ENOMEM; } static void ar5523_data_tx_pkt_put(struct ar5523 *ar) { atomic_dec(&ar->tx_nr_total); if (!atomic_dec_return(&ar->tx_nr_pending)) { timer_delete(&ar->tx_wd_timer); wake_up(&ar->tx_flush_waitq); } if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) { ar5523_dbg(ar, "restart tx queue\n"); ieee80211_wake_queues(ar->hw); } } static void ar5523_data_tx_cb(struct urb *urb) { struct sk_buff *skb = urb->context; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ar5523_tx_data *data = (struct ar5523_tx_data *) txi->driver_data; struct ar5523 *ar = data->ar; unsigned long flags; ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status); spin_lock_irqsave(&ar->tx_data_list_lock, flags); list_del(&data->list); spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); if (urb->status) { ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status); ar5523_data_tx_pkt_put(ar); ieee80211_free_txskb(ar->hw, skb); } else { skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32)); ieee80211_tx_status_irqsafe(ar->hw, skb); } usb_free_urb(urb); } static void ar5523_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ar5523_tx_data *data = (struct ar5523_tx_data *) txi->driver_data; struct ar5523 *ar = hw->priv; unsigned long flags; ar5523_dbg(ar, "tx called\n"); if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) { ar5523_dbg(ar, "tx queue full\n"); ar5523_dbg(ar, "stop queues (tot %d pend %d)\n", atomic_read(&ar->tx_nr_total), atomic_read(&ar->tx_nr_pending)); ieee80211_stop_queues(hw); } spin_lock_irqsave(&ar->tx_data_list_lock, flags); list_add_tail(&data->list, &ar->tx_queue_pending); spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ieee80211_queue_work(ar->hw, &ar->tx_work); } static void ar5523_tx_work_locked(struct ar5523 *ar) { struct ar5523_tx_data *data; struct ar5523_tx_desc *desc; struct ar5523_chunk *chunk; struct ieee80211_tx_info *txi; struct urb *urb; struct sk_buff *skb; int error = 0, paylen; u32 txqid; unsigned long flags; BUILD_BUG_ON(sizeof(struct ar5523_tx_data) > IEEE80211_TX_INFO_DRIVER_DATA_SIZE); ar5523_dbg(ar, "%s\n", __func__); do { spin_lock_irqsave(&ar->tx_data_list_lock, flags); if (!list_empty(&ar->tx_queue_pending)) { data = (struct ar5523_tx_data *) ar->tx_queue_pending.next; list_del(&data->list); } else data = NULL; spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); if (!data) break; txi = container_of((void *)data, struct ieee80211_tx_info, driver_data); txqid = 0; skb = container_of((void *)txi, struct sk_buff, cb); paylen = skb->len; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { ieee80211_free_txskb(ar->hw, skb); continue; } data->ar = ar; data->urb = urb; desc = skb_push(skb, sizeof(*desc)); chunk = skb_push(skb, sizeof(*chunk)); chunk->seqnum = 0; chunk->flags = UATH_CFLAGS_FINAL; chunk->length = cpu_to_be16(skb->len); desc->msglen = cpu_to_be32(skb->len); desc->msgid = AR5523_DATA_ID; desc->buflen = cpu_to_be32(paylen); desc->type = cpu_to_be32(WDCMSG_SEND); desc->flags = cpu_to_be32(UATH_TX_NOTIFY); if (test_bit(AR5523_CONNECTED, &ar->flags)) desc->connid = cpu_to_be32(AR5523_ID_BSS); else desc->connid = cpu_to_be32(AR5523_ID_BROADCAST); if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE) txqid |= UATH_TXQID_MINRATE; desc->txqid = cpu_to_be32(txqid); urb->transfer_flags = URB_ZERO_PACKET; usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev), skb->data, skb->len, ar5523_data_tx_cb, skb); spin_lock_irqsave(&ar->tx_data_list_lock, flags); list_add_tail(&data->list, &ar->tx_queue_submitted); spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT); atomic_inc(&ar->tx_nr_pending); ar5523_dbg(ar, "TX Frame (%d pending)\n", atomic_read(&ar->tx_nr_pending)); error = usb_submit_urb(urb, GFP_KERNEL); if (error) { ar5523_err(ar, "error %d when submitting tx urb\n", error); spin_lock_irqsave(&ar->tx_data_list_lock, flags); list_del(&data->list); spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); atomic_dec(&ar->tx_nr_pending); ar5523_data_tx_pkt_put(ar); usb_free_urb(urb); ieee80211_free_txskb(ar->hw, skb); } } while (true); } static void ar5523_tx_work(struct work_struct *work) { struct ar5523 *ar = container_of(work, struct ar5523, tx_work); ar5523_dbg(ar, "%s\n", __func__); mutex_lock(&ar->mutex); ar5523_tx_work_locked(ar); mutex_unlock(&ar->mutex); } static void ar5523_tx_wd_timer(struct timer_list *t) { struct ar5523 *ar = from_timer(ar, t, tx_wd_timer); ar5523_dbg(ar, "TX watchdog timer triggered\n"); ieee80211_queue_work(ar->hw, &ar->tx_wd_work); } static void ar5523_tx_wd_work(struct work_struct *work) { struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work); /* Occasionally the TX queues stop responding. The only way to * recover seems to be to reset the dongle. */ mutex_lock(&ar->mutex); ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n", atomic_read(&ar->tx_nr_total), atomic_read(&ar->tx_nr_pending)); ar5523_err(ar, "Will restart dongle.\n"); ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0); mutex_unlock(&ar->mutex); } static void ar5523_flush_tx(struct ar5523 *ar) { ar5523_tx_work_locked(ar); /* Don't waste time trying to flush if USB is disconnected */ if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) return; if (!wait_event_timeout(ar->tx_flush_waitq, !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT)) ar5523_err(ar, "flush timeout (tot %d pend %d)\n", atomic_read(&ar->tx_nr_total), atomic_read(&ar->tx_nr_pending)); } static void ar5523_free_tx_cmd(struct ar5523 *ar) { struct ar5523_tx_cmd *cmd = &ar->tx_cmd; usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx, cmd->urb_tx->transfer_dma); usb_free_urb(cmd->urb_tx); } static int ar5523_alloc_tx_cmd(struct ar5523 *ar) { struct ar5523_tx_cmd *cmd = &ar->tx_cmd; cmd->ar = ar; init_completion(&cmd->done); cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL); if (!cmd->urb_tx) return -ENOMEM; cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ, GFP_KERNEL, &cmd->urb_tx->transfer_dma); if (!cmd->buf_tx) { usb_free_urb(cmd->urb_tx); return -ENOMEM; } return 0; } /* * This function is called periodically (every second) when associated to * query device statistics. */ static void ar5523_stat_work(struct work_struct *work) { struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work); int error; ar5523_dbg(ar, "%s\n", __func__); mutex_lock(&ar->mutex); /* * Send request for statistics asynchronously once a second. This * seems to be important. Throughput is a lot better if this is done. */ error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0); if (error) ar5523_err(ar, "could not query stats, error %d\n", error); mutex_unlock(&ar->mutex); ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ); } /* * Interface routines to the mac80211 stack. */ static int ar5523_start(struct ieee80211_hw *hw) { struct ar5523 *ar = hw->priv; int error; __be32 val; ar5523_dbg(ar, "start called\n"); mutex_lock(&ar->mutex); val = cpu_to_be32(0); ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0); /* set MAC address */ ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr, ETH_ALEN); /* XXX honor net80211 state */ ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001); ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001); ar5523_config(ar, CFG_ABOLT, 0x0000003f); ar5523_config(ar, CFG_WME_ENABLED, 0x00000000); ar5523_config(ar, CFG_SERVICE_TYPE, 1); ar5523_config(ar, CFG_TP_SCALE, 0x00000000); ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c); ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c); ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000); ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000); ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003); ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000); ar5523_config(ar, CFG_MODE_CTS, 0x00000002); error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0, &val, sizeof(val), AR5523_CMD_FLAG_MAGIC); if (error) { ar5523_dbg(ar, "could not start target, error %d\n", error); goto err; } ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n", be32_to_cpu(val)); ar5523_switch_chan(ar); val = cpu_to_be32(TARGET_DEVICE_AWAKE); ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0); /* XXX? check */ ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0); set_bit(AR5523_HW_UP, &ar->flags); queue_work(ar->wq, &ar->rx_refill_work); /* enable Rx */ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT); ar5523_set_rxfilter(ar, UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON, UATH_FILTER_OP_SET); ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON); ar5523_dbg(ar, "start OK\n"); err: mutex_unlock(&ar->mutex); return error; } static void ar5523_stop(struct ieee80211_hw *hw, bool suspend) { struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "stop called\n"); cancel_delayed_work_sync(&ar->stat_work); mutex_lock(&ar->mutex); clear_bit(AR5523_HW_UP, &ar->flags); ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF); ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF); ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0); timer_delete_sync(&ar->tx_wd_timer); cancel_work_sync(&ar->tx_wd_work); cancel_work_sync(&ar->rx_refill_work); ar5523_cancel_rx_bufs(ar); mutex_unlock(&ar->mutex); } static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct ar5523 *ar = hw->priv; int ret; ar5523_dbg(ar, "set_rts_threshold called\n"); mutex_lock(&ar->mutex); ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value); mutex_unlock(&ar->mutex); return ret; } static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "flush called\n"); ar5523_flush_tx(ar); } static int ar5523_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "add interface called\n"); if (ar->vif) { ar5523_dbg(ar, "invalid add_interface\n"); return -EOPNOTSUPP; } switch (vif->type) { case NL80211_IFTYPE_STATION: ar->vif = vif; break; default: return -EOPNOTSUPP; } return 0; } static void ar5523_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "remove interface called\n"); ar->vif = NULL; } static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed) { struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "config called\n"); mutex_lock(&ar->mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ar5523_dbg(ar, "Do channel switch\n"); ar5523_flush_tx(ar); ar5523_switch_chan(ar); } mutex_unlock(&ar->mutex); return 0; } static int ar5523_get_wlan_mode(struct ar5523 *ar, struct ieee80211_bss_conf *bss_conf) { struct ieee80211_supported_band *band; int bit; struct ieee80211_sta *sta; u32 sta_rate_set; band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; sta = ieee80211_find_sta(ar->vif, bss_conf->bssid); if (!sta) { ar5523_info(ar, "STA not found!\n"); return WLAN_MODE_11b; } sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band]; for (bit = 0; bit < band->n_bitrates; bit++) { if (sta_rate_set & 1) { int rate = band->bitrates[bit].bitrate; switch (rate) { case 60: case 90: case 120: case 180: case 240: case 360: case 480: case 540: return WLAN_MODE_11g; } } sta_rate_set >>= 1; } return WLAN_MODE_11b; } static void ar5523_create_rateset(struct ar5523 *ar, struct ieee80211_bss_conf *bss_conf, struct ar5523_cmd_rateset *rs, bool basic) { struct ieee80211_supported_band *band; struct ieee80211_sta *sta; int bit, i = 0; u32 sta_rate_set, basic_rate_set; sta = ieee80211_find_sta(ar->vif, bss_conf->bssid); basic_rate_set = bss_conf->basic_rates; if (!sta) { ar5523_info(ar, "STA not found. Cannot set rates\n"); sta_rate_set = bss_conf->basic_rates; } else sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band]; ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set); band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; for (bit = 0; bit < band->n_bitrates; bit++) { BUG_ON(i >= AR5523_MAX_NRATES); ar5523_dbg(ar, "Considering rate %d : %d\n", band->bitrates[bit].hw_value, sta_rate_set & 1); if (sta_rate_set & 1) { rs->set[i] = band->bitrates[bit].hw_value; if (basic_rate_set & 1 && basic) rs->set[i] |= 0x80; i++; } sta_rate_set >>= 1; basic_rate_set >>= 1; } rs->length = i; } static int ar5523_set_basic_rates(struct ar5523 *ar, struct ieee80211_bss_conf *bss) { struct ar5523_cmd_rates rates; memset(&rates, 0, sizeof(rates)); rates.connid = cpu_to_be32(2); /* XXX */ rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset)); ar5523_create_rateset(ar, bss, &rates.rateset, true); return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates, sizeof(rates), 0); } static int ar5523_create_connection(struct ar5523 *ar, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss) { struct ar5523_cmd_create_connection create; int wlan_mode; memset(&create, 0, sizeof(create)); create.connid = cpu_to_be32(2); create.bssid = cpu_to_be32(0); /* XXX packed or not? */ create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset)); ar5523_create_rateset(ar, bss, &create.connattr.rateset, false); wlan_mode = ar5523_get_wlan_mode(ar, bss); create.connattr.wlanmode = cpu_to_be32(wlan_mode); return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create, sizeof(create), 0); } static int ar5523_write_associd(struct ar5523 *ar, struct ieee80211_vif *vif) { struct ieee80211_bss_conf *bss = &vif->bss_conf; struct ar5523_cmd_set_associd associd; memset(&associd, 0, sizeof(associd)); associd.defaultrateix = cpu_to_be32(0); /* XXX */ associd.associd = cpu_to_be32(vif->cfg.aid); associd.timoffset = cpu_to_be32(0x3b); /* XXX */ memcpy(associd.bssid, bss->bssid, ETH_ALEN); return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd, sizeof(associd), 0); } static void ar5523_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss, u64 changed) { struct ar5523 *ar = hw->priv; int error; ar5523_dbg(ar, "bss_info_changed called\n"); mutex_lock(&ar->mutex); if (!(changed & BSS_CHANGED_ASSOC)) goto out_unlock; if (vif->cfg.assoc) { error = ar5523_create_connection(ar, vif, bss); if (error) { ar5523_err(ar, "could not create connection\n"); goto out_unlock; } error = ar5523_set_basic_rates(ar, bss); if (error) { ar5523_err(ar, "could not set negotiated rate set\n"); goto out_unlock; } error = ar5523_write_associd(ar, vif); if (error) { ar5523_err(ar, "could not set association\n"); goto out_unlock; } /* turn link LED on */ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON); set_bit(AR5523_CONNECTED, &ar->flags); ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ); } else { cancel_delayed_work(&ar->stat_work); clear_bit(AR5523_CONNECTED, &ar->flags); ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF); } out_unlock: mutex_unlock(&ar->mutex); } #define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \ FIF_FCSFAIL | \ FIF_OTHER_BSS) static void ar5523_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct ar5523 *ar = hw->priv; u32 filter = 0; ar5523_dbg(ar, "configure_filter called\n"); mutex_lock(&ar->mutex); ar5523_flush_tx(ar); *total_flags &= AR5523_SUPPORTED_FILTERS; /* The filters seems strange. UATH_FILTER_RX_BCAST and * UATH_FILTER_RX_MCAST does not result in those frames being RXed. * The only way I have found to get [mb]cast frames seems to be * to set UATH_FILTER_RX_PROM. */ filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON | UATH_FILTER_RX_PROM; ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT); ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET); mutex_unlock(&ar->mutex); } static const struct ieee80211_ops ar5523_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .start = ar5523_start, .stop = ar5523_stop, .tx = ar5523_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .set_rts_threshold = ar5523_set_rts_threshold, .add_interface = ar5523_add_interface, .remove_interface = ar5523_remove_interface, .config = ar5523_hwconfig, .bss_info_changed = ar5523_bss_info_changed, .configure_filter = ar5523_configure_filter, .flush = ar5523_flush, }; static int ar5523_host_available(struct ar5523 *ar) { struct ar5523_cmd_host_available setup; /* inform target the host is available */ setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR); setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR); setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH); setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD); return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE, &setup, sizeof(setup), NULL, 0, 0); } static int ar5523_get_devstatus(struct ar5523 *ar) { u8 macaddr[ETH_ALEN]; int error; /* retrieve MAC address */ error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN); if (error) { ar5523_err(ar, "could not read MAC address\n"); return error; } SET_IEEE80211_PERM_ADDR(ar->hw, macaddr); error = ar5523_get_status(ar, ST_SERIAL_NUMBER, &ar->serial[0], sizeof(ar->serial)); if (error) { ar5523_err(ar, "could not read device serial number\n"); return error; } return 0; } #define AR5523_SANE_RXBUFSZ 2000 static int ar5523_get_max_rxsz(struct ar5523 *ar) { int error; __be32 rxsize; /* Get max rx size */ error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize, sizeof(rxsize)); if (error != 0) { ar5523_err(ar, "could not read max RX size\n"); return error; } ar->rxbufsz = be32_to_cpu(rxsize); if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) { ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n", AR5523_SANE_RXBUFSZ); ar->rxbufsz = AR5523_SANE_RXBUFSZ; } ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz); return 0; } /* * This is copied from rtl818x, but we should probably move this * to common code as in OpenBSD. */ static const struct ieee80211_rate ar5523_rates[] = { { .bitrate = 10, .hw_value = 2, }, { .bitrate = 20, .hw_value = 4 }, { .bitrate = 55, .hw_value = 11, }, { .bitrate = 110, .hw_value = 22, }, { .bitrate = 60, .hw_value = 12, }, { .bitrate = 90, .hw_value = 18, }, { .bitrate = 120, .hw_value = 24, }, { .bitrate = 180, .hw_value = 36, }, { .bitrate = 240, .hw_value = 48, }, { .bitrate = 360, .hw_value = 72, }, { .bitrate = 480, .hw_value = 96, }, { .bitrate = 540, .hw_value = 108, }, }; static const struct ieee80211_channel ar5523_channels[] = { { .center_freq = 2412 }, { .center_freq = 2417 }, { .center_freq = 2422 }, { .center_freq = 2427 }, { .center_freq = 2432 }, { .center_freq = 2437 }, { .center_freq = 2442 }, { .center_freq = 2447 }, { .center_freq = 2452 }, { .center_freq = 2457 }, { .center_freq = 2462 }, { .center_freq = 2467 }, { .center_freq = 2472 }, { .center_freq = 2484 }, }; static int ar5523_init_modes(struct ar5523 *ar) { BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels)); BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates)); memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels)); memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates)); ar->band.band = NL80211_BAND_2GHZ; ar->band.channels = ar->channels; ar->band.n_channels = ARRAY_SIZE(ar5523_channels); ar->band.bitrates = ar->rates; ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates); ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band; return 0; } /* * Load the MIPS R4000 microcode into the device. Once the image is loaded, * the device will detach itself from the bus and reattach later with a new * product Id (a la ezusb). */ static int ar5523_load_firmware(struct usb_device *dev) { struct ar5523_fwblock *txblock, *rxblock; const struct firmware *fw; void *fwbuf; int len, offset; int foolen; /* XXX(hch): handle short transfers */ int error = -ENXIO; if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) { dev_err(&dev->dev, "no firmware found: %s\n", AR5523_FIRMWARE_FILE); return -ENOENT; } txblock = kzalloc(sizeof(*txblock), GFP_KERNEL); if (!txblock) goto out; rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL); if (!rxblock) goto out_free_txblock; fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL); if (!fwbuf) goto out_free_rxblock; txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK); txblock->total = cpu_to_be32(fw->size); offset = 0; len = fw->size; while (len > 0) { int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE); txblock->remain = cpu_to_be32(len - mlen); txblock->len = cpu_to_be32(mlen); /* send firmware block meta-data */ error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev), txblock, sizeof(*txblock), &foolen, AR5523_CMD_TIMEOUT); if (error) { dev_err(&dev->dev, "could not send firmware block info\n"); goto out_free_fwbuf; } /* send firmware block data */ memcpy(fwbuf, fw->data + offset, mlen); error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev), fwbuf, mlen, &foolen, AR5523_DATA_TIMEOUT); if (error) { dev_err(&dev->dev, "could not send firmware block data\n"); goto out_free_fwbuf; } /* wait for ack from firmware */ error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev), rxblock, sizeof(*rxblock), &foolen, AR5523_CMD_TIMEOUT); if (error) { dev_err(&dev->dev, "could not read firmware answer\n"); goto out_free_fwbuf; } len -= mlen; offset += mlen; } /* * Set the error to -ENXIO to make sure we continue probing for * a driver. */ error = -ENXIO; out_free_fwbuf: kfree(fwbuf); out_free_rxblock: kfree(rxblock); out_free_txblock: kfree(txblock); out: release_firmware(fw); return error; } static int ar5523_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct ieee80211_hw *hw; struct ar5523 *ar; int error = -ENOMEM; static const u8 bulk_ep_addr[] = { AR5523_CMD_TX_PIPE | USB_DIR_OUT, AR5523_DATA_TX_PIPE | USB_DIR_OUT, AR5523_CMD_RX_PIPE | USB_DIR_IN, AR5523_DATA_RX_PIPE | USB_DIR_IN, 0}; if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) { dev_err(&dev->dev, "Could not find all expected endpoints\n"); error = -ENODEV; goto out; } /* * Load firmware if the device requires it. This will return * -ENXIO on success and we'll get called back afer the usb * id changes to indicate that the firmware is present. */ if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE) return ar5523_load_firmware(dev); hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops); if (!hw) goto out; SET_IEEE80211_DEV(hw, &intf->dev); ar = hw->priv; ar->hw = hw; ar->dev = dev; mutex_init(&ar->mutex); INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work); timer_setup(&ar->tx_wd_timer, ar5523_tx_wd_timer, 0); INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work); INIT_WORK(&ar->tx_work, ar5523_tx_work); INIT_LIST_HEAD(&ar->tx_queue_pending); INIT_LIST_HEAD(&ar->tx_queue_submitted); spin_lock_init(&ar->tx_data_list_lock); atomic_set(&ar->tx_nr_total, 0); atomic_set(&ar->tx_nr_pending, 0); init_waitqueue_head(&ar->tx_flush_waitq); atomic_set(&ar->rx_data_free_cnt, 0); INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work); INIT_LIST_HEAD(&ar->rx_data_free); INIT_LIST_HEAD(&ar->rx_data_used); spin_lock_init(&ar->rx_data_list_lock); ar->wq = create_singlethread_workqueue("ar5523"); if (!ar->wq) { ar5523_err(ar, "Could not create wq\n"); goto out_free_ar; } error = ar5523_alloc_rx_bufs(ar); if (error) { ar5523_err(ar, "Could not allocate rx buffers\n"); goto out_free_wq; } error = ar5523_alloc_rx_cmd(ar); if (error) { ar5523_err(ar, "Could not allocate rx command buffers\n"); goto out_free_rx_bufs; } error = ar5523_alloc_tx_cmd(ar); if (error) { ar5523_err(ar, "Could not allocate tx command buffers\n"); goto out_free_rx_cmd; } error = ar5523_submit_rx_cmd(ar); if (error) { ar5523_err(ar, "Failed to submit rx cmd\n"); goto out_free_tx_cmd; } /* * We're now ready to send/receive firmware commands. */ error = ar5523_host_available(ar); if (error) { ar5523_err(ar, "could not initialize adapter\n"); goto out_cancel_rx_cmd; } error = ar5523_get_max_rxsz(ar); if (error) { ar5523_err(ar, "could not get caps from adapter\n"); goto out_cancel_rx_cmd; } error = ar5523_get_devcap(ar); if (error) { ar5523_err(ar, "could not get caps from adapter\n"); goto out_cancel_rx_cmd; } error = ar5523_get_devstatus(ar); if (error != 0) { ar5523_err(ar, "could not get device status\n"); goto out_cancel_rx_cmd; } ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n", (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2'); ar->vif = NULL; ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) + sizeof(struct ar5523_chunk); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); hw->queues = 1; error = ar5523_init_modes(ar); if (error) goto out_cancel_rx_cmd; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); usb_set_intfdata(intf, hw); error = ieee80211_register_hw(hw); if (error) { ar5523_err(ar, "could not register device\n"); goto out_cancel_rx_cmd; } ar5523_info(ar, "Found and initialized AR5523 device\n"); return 0; out_cancel_rx_cmd: ar5523_cancel_rx_cmd(ar); out_free_tx_cmd: ar5523_free_tx_cmd(ar); out_free_rx_cmd: ar5523_free_rx_cmd(ar); out_free_rx_bufs: ar5523_free_rx_bufs(ar); out_free_wq: destroy_workqueue(ar->wq); out_free_ar: ieee80211_free_hw(hw); out: return error; } static void ar5523_disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct ar5523 *ar = hw->priv; ar5523_dbg(ar, "detaching\n"); set_bit(AR5523_USB_DISCONNECTED, &ar->flags); ieee80211_unregister_hw(hw); ar5523_cancel_rx_cmd(ar); ar5523_free_tx_cmd(ar); ar5523_free_rx_cmd(ar); ar5523_free_rx_bufs(ar); destroy_workqueue(ar->wq); ieee80211_free_hw(hw); usb_set_intfdata(intf, NULL); } #define AR5523_DEVICE_UG(vendor, device) \ { USB_DEVICE((vendor), (device)) }, \ { USB_DEVICE((vendor), (device) + 1), \ .driver_info = AR5523_FLAG_PRE_FIRMWARE } #define AR5523_DEVICE_UX(vendor, device) \ { USB_DEVICE((vendor), (device)), \ .driver_info = AR5523_FLAG_ABG }, \ { USB_DEVICE((vendor), (device) + 1), \ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE } static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */ AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */ AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */ AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */ AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */ AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108 (CyberTAN Technology) */ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */ AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect SMCWUSBT-G2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */ AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */ AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */ AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */ { } }; MODULE_DEVICE_TABLE(usb, ar5523_id_table); static struct usb_driver ar5523_driver = { .name = "ar5523", .id_table = ar5523_id_table, .probe = ar5523_probe, .disconnect = ar5523_disconnect, }; module_usb_driver(ar5523_driver); MODULE_DESCRIPTION("Atheros AR5523 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(AR5523_FIRMWARE_FILE); |
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 9 9 1 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 | /* * Cryptographic API. * * Anubis Algorithm * * The Anubis algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. * * See * * P.S.L.M. Barreto, V. Rijmen, * ``The Anubis block cipher,'' * NESSIE submission, 2000. * * This software implements the "tweaked" version of Anubis. * Only the S-box and (consequently) the rounds constants have been * changed. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, October 28, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/unaligned.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 #define ANUBIS_MAX_KEY_SIZE 40 #define ANUBIS_BLOCK_SIZE 16 #define ANUBIS_MAX_N 10 #define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N) struct anubis_ctx { int key_len; // in bits int R; u32 E[ANUBIS_MAX_ROUNDS + 1][4]; u32 D[ANUBIS_MAX_ROUNDS + 1][4]; }; static const u32 T0[256] = { 0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U, 0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U, 0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U, 0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U, 0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU, 0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U, 0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U, 0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U, 0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU, 0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U, 0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U, 0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU, 0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U, 0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U, 0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U, 0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U, 0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U, 0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U, 0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U, 0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U, 0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U, 0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U, 0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U, 0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U, 0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU, 0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU, 0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU, 0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U, 0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU, 0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU, 0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U, 0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U, 0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U, 0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U, 0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U, 0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U, 0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU, 0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU, 0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU, 0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU, 0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU, 0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU, 0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU, 0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U, 0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU, 0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U, 0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU, 0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU, 0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U, 0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U, 0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U, 0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU, 0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU, 0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U, 0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U, 0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U, 0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U, 0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU, 0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU, 0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U, 0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU, 0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU, 0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU, 0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U, }; static const u32 T1[256] = { 0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU, 0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U, 0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U, 0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU, 0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U, 0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U, 0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU, 0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U, 0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U, 0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U, 0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU, 0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U, 0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U, 0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U, 0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U, 0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU, 0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U, 0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U, 0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU, 0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU, 0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U, 0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U, 0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U, 0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U, 0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U, 0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU, 0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U, 0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U, 0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U, 0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU, 0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U, 0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U, 0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU, 0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U, 0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU, 0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU, 0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU, 0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U, 0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU, 0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU, 0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU, 0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U, 0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U, 0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U, 0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U, 0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U, 0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U, 0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU, 0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U, 0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U, 0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U, 0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U, 0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U, 0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU, 0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U, 0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U, 0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U, 0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U, 0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU, 0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU, 0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U, 0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU, 0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U, 0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U, }; static const u32 T2[256] = { 0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U, 0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU, 0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U, 0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U, 0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU, 0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U, 0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU, 0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U, 0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU, 0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU, 0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U, 0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U, 0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U, 0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU, 0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U, 0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U, 0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U, 0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U, 0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU, 0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU, 0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U, 0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU, 0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU, 0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U, 0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU, 0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U, 0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U, 0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU, 0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U, 0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U, 0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU, 0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U, 0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU, 0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U, 0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU, 0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U, 0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U, 0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U, 0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U, 0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U, 0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U, 0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U, 0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U, 0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU, 0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU, 0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU, 0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU, 0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U, 0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U, 0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U, 0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U, 0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU, 0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU, 0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U, 0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U, 0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU, 0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U, 0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU, 0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U, 0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U, 0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU, 0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U, 0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU, 0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U, }; static const u32 T3[256] = { 0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U, 0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU, 0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU, 0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU, 0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U, 0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U, 0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U, 0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU, 0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU, 0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU, 0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U, 0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U, 0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U, 0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU, 0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU, 0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU, 0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU, 0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU, 0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U, 0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU, 0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U, 0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U, 0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U, 0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U, 0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U, 0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU, 0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU, 0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U, 0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U, 0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U, 0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U, 0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU, 0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U, 0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU, 0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U, 0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U, 0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU, 0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U, 0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U, 0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU, 0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U, 0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U, 0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU, 0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU, 0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U, 0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU, 0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U, 0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU, 0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U, 0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U, 0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U, 0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U, 0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U, 0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU, 0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU, 0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U, 0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U, 0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U, 0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U, 0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U, 0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU, 0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U, 0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU, 0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U, }; static const u32 T4[256] = { 0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U, 0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU, 0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU, 0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU, 0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U, 0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U, 0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U, 0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU, 0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU, 0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU, 0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U, 0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U, 0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U, 0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU, 0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU, 0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU, 0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU, 0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU, 0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U, 0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU, 0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U, 0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U, 0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U, 0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U, 0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U, 0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU, 0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU, 0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U, 0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U, 0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U, 0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U, 0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU, 0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U, 0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU, 0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U, 0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U, 0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU, 0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U, 0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U, 0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU, 0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U, 0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U, 0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU, 0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU, 0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U, 0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU, 0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U, 0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU, 0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U, 0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U, 0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U, 0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U, 0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U, 0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU, 0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU, 0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U, 0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U, 0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U, 0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U, 0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U, 0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU, 0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U, 0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU, 0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U, }; static const u32 T5[256] = { 0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U, 0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U, 0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U, 0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U, 0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U, 0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U, 0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U, 0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U, 0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U, 0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U, 0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U, 0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U, 0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U, 0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U, 0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U, 0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U, 0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U, 0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U, 0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U, 0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U, 0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U, 0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U, 0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U, 0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U, 0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU, 0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU, 0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU, 0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU, 0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU, 0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU, 0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU, 0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU, 0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU, 0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU, 0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU, 0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU, 0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU, 0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU, 0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU, 0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU, 0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U, 0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U, 0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U, 0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U, 0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U, 0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U, 0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U, 0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U, 0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U, 0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U, 0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U, 0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U, 0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U, 0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U, 0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U, 0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U, 0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU, 0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU, 0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU, 0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU, 0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU, 0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU, 0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU, 0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU, }; static const u32 rc[] = { 0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU, 0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU, 0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U, 0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU, 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, }; static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; switch (key_len) { case 16: case 20: case 24: case 28: case 32: case 36: case 40: break; default: return -EINVAL; } ctx->key_len = key_len * 8; N = ctx->key_len >> 5; ctx->R = R = 8 + N; /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) kappa[i] = get_unaligned_be32(&in_key[4 * i]); /* * generate R + 1 round keys: */ for (r = 0; r <= R; r++) { u32 K0, K1, K2, K3; /* * generate r-th round key K^r: */ K0 = T4[(kappa[N - 1] >> 24) ]; K1 = T4[(kappa[N - 1] >> 16) & 0xff]; K2 = T4[(kappa[N - 1] >> 8) & 0xff]; K3 = T4[(kappa[N - 1] ) & 0xff]; for (i = N - 2; i >= 0; i--) { K0 = T4[(kappa[i] >> 24) ] ^ (T5[(K0 >> 24) ] & 0xff000000U) ^ (T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K0 ) & 0xff] & 0x000000ffU); K1 = T4[(kappa[i] >> 16) & 0xff] ^ (T5[(K1 >> 24) ] & 0xff000000U) ^ (T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K1 ) & 0xff] & 0x000000ffU); K2 = T4[(kappa[i] >> 8) & 0xff] ^ (T5[(K2 >> 24) ] & 0xff000000U) ^ (T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K2 ) & 0xff] & 0x000000ffU); K3 = T4[(kappa[i] ) & 0xff] ^ (T5[(K3 >> 24) ] & 0xff000000U) ^ (T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K3 ) & 0xff] & 0x000000ffU); } ctx->E[r][0] = K0; ctx->E[r][1] = K1; ctx->E[r][2] = K2; ctx->E[r][3] = K3; /* * compute kappa^{r+1} from kappa^r: */ if (r == R) break; for (i = 0; i < N; i++) { int j = i; inter[i] = T0[(kappa[j--] >> 24) ]; if (j < 0) j = N - 1; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T3[(kappa[j ] ) & 0xff]; } kappa[0] = inter[0] ^ rc[r]; for (i = 1; i < N; i++) kappa[i] = inter[i]; } /* * generate inverse key schedule: K'^0 = K^R, K'^R = * K^0, K'^r = theta(K^{R-r}): */ for (i = 0; i < 4; i++) { ctx->D[0][i] = ctx->E[R][i]; ctx->D[R][i] = ctx->E[0][i]; } for (r = 1; r < R; r++) { for (i = 0; i < 4; i++) { u32 v = ctx->E[R - r][i]; ctx->D[r][i] = T0[T4[(v >> 24) ] & 0xff] ^ T1[T4[(v >> 16) & 0xff] & 0xff] ^ T2[T4[(v >> 8) & 0xff] & 0xff] ^ T3[T4[(v ) & 0xff] & 0xff]; } } return 0; } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], u8 *dst, const u8 *src, const int R) { int i, r; u32 state[4]; u32 inter[4]; /* * map plaintext block to cipher state (mu) * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i]; /* * R - 1 full rounds: */ for (r = 1; r < R; r++) { inter[0] = T0[(state[0] >> 24) ] ^ T1[(state[1] >> 24) ] ^ T2[(state[2] >> 24) ] ^ T3[(state[3] >> 24) ] ^ roundKey[r][0]; inter[1] = T0[(state[0] >> 16) & 0xff] ^ T1[(state[1] >> 16) & 0xff] ^ T2[(state[2] >> 16) & 0xff] ^ T3[(state[3] >> 16) & 0xff] ^ roundKey[r][1]; inter[2] = T0[(state[0] >> 8) & 0xff] ^ T1[(state[1] >> 8) & 0xff] ^ T2[(state[2] >> 8) & 0xff] ^ T3[(state[3] >> 8) & 0xff] ^ roundKey[r][2]; inter[3] = T0[(state[0] ) & 0xff] ^ T1[(state[1] ) & 0xff] ^ T2[(state[2] ) & 0xff] ^ T3[(state[3] ) & 0xff] ^ roundKey[r][3]; state[0] = inter[0]; state[1] = inter[1]; state[2] = inter[2]; state[3] = inter[3]; } /* * last round: */ inter[0] = (T0[(state[0] >> 24) ] & 0xff000000U) ^ (T1[(state[1] >> 24) ] & 0x00ff0000U) ^ (T2[(state[2] >> 24) ] & 0x0000ff00U) ^ (T3[(state[3] >> 24) ] & 0x000000ffU) ^ roundKey[R][0]; inter[1] = (T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^ roundKey[R][1]; inter[2] = (T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^ roundKey[R][2]; inter[3] = (T0[(state[0] ) & 0xff] & 0xff000000U) ^ (T1[(state[1] ) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] ) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] ) & 0xff] & 0x000000ffU) ^ roundKey[R][3]; /* * map cipher state to ciphertext block (mu^{-1}): */ for (i = 0; i < 4; i++) put_unaligned_be32(inter[i], &dst[4 * i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->E, dst, src, ctx->R); } static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->D, dst, src, ctx->R); } static struct crypto_alg anubis_alg = { .cra_name = "anubis", .cra_driver_name = "anubis-generic", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, .cia_max_keysize = ANUBIS_MAX_KEY_SIZE, .cia_setkey = anubis_setkey, .cia_encrypt = anubis_encrypt, .cia_decrypt = anubis_decrypt } } }; static int __init anubis_mod_init(void) { int ret = 0; ret = crypto_register_alg(&anubis_alg); return ret; } static void __exit anubis_mod_fini(void) { crypto_unregister_alg(&anubis_alg); } subsys_initcall(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Anubis Cryptographic Algorithm"); MODULE_ALIAS_CRYPTO("anubis"); |
9 10 8 2 7 10 10 10 9 1 29 9 20 10 10 20 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 | // SPDX-License-Identifier: GPL-2.0-only /* * AppArmor security module * * This file contains AppArmor policy manipulation functions * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * AppArmor policy is based around profiles, which contain the rules a * task is confined by. Every task in the system has a profile attached * to it determined either by matching "unconfined" tasks against the * visible set of profiles or by following a profiles attachment rules. * * Each profile exists in a profile namespace which is a container of * visible profiles. Each namespace contains a special "unconfined" profile, * which doesn't enforce any confinement on a task beyond DAC. * * Namespace and profile names can be written together in either * of two syntaxes. * :namespace:profile - used by kernel interfaces for easy detection * namespace://profile - used by policy * * Profile names can not start with : or @ or ^ and may not contain \0 * * Reserved profile names * unconfined - special automatically generated unconfined profile * inherit - special name to indicate profile inheritance * null-XXXX-YYYY - special automatically generated learning profiles * * Namespace names may not start with / or @ and may not contain \0 or : * Reserved namespace names * user-XXXX - user defined profiles * * a // in a profile or namespace name indicates a hierarchical name with the * name before the // being the parent and the name after the child. * * Profile and namespace hierarchies serve two different but similar purposes. * The namespace contains the set of visible profiles that are considered * for attachment. The hierarchy of namespaces allows for virtualizing * the namespace so that for example a chroot can have its own set of profiles * which may define some local user namespaces. * The profile hierarchy severs two distinct purposes, * - it allows for sub profiles or hats, which allows an application to run * subprograms under its own profile with different restriction than it * self, and not have it use the system profile. * eg. if a mail program starts an editor, the policy might make the * restrictions tighter on the editor tighter than the mail program, * and definitely different than general editor restrictions * - it allows for binary hierarchy of profiles, so that execution history * is preserved. This feature isn't exploited by AppArmor reference policy * but is allowed. NOTE: this is currently suboptimal because profile * aliasing is not currently implemented so that a profile for each * level must be defined. * eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started * from /bin/bash * * A profile or namespace name that can contain one or more // separators * is referred to as an hname (hierarchical). * eg. /bin/bash//bin/ls * * An fqname is a name that may contain both namespace and profile hnames. * eg. :ns:/bin/bash//bin/ls * * NOTES: * - locking of profile lists is currently fairly coarse. All profile * lists within a namespace use the namespace lock. * FIXME: move profile lists to using rcu_lists */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/cred.h> #include <linux/rculist.h> #include <linux/user_namespace.h> #include "include/apparmor.h" #include "include/capability.h" #include "include/cred.h" #include "include/file.h" #include "include/ipc.h" #include "include/match.h" #include "include/path.h" #include "include/policy.h" #include "include/policy_ns.h" #include "include/policy_unpack.h" #include "include/resource.h" int unprivileged_userns_apparmor_policy = 1; int aa_unprivileged_unconfined_restricted; const char *const aa_profile_mode_names[] = { "enforce", "complain", "kill", "unconfined", "user", }; static void aa_free_pdb(struct aa_policydb *pdb) { if (pdb) { aa_put_dfa(pdb->dfa); kvfree(pdb->perms); aa_free_str_table(&pdb->trans); kfree(pdb); } } /** * aa_pdb_free_kref - free aa_policydb by kref (called by aa_put_pdb) * @kref: kref callback for freeing of a dfa (NOT NULL) */ void aa_pdb_free_kref(struct kref *kref) { struct aa_policydb *pdb = container_of(kref, struct aa_policydb, count); aa_free_pdb(pdb); } struct aa_policydb *aa_alloc_pdb(gfp_t gfp) { struct aa_policydb *pdb = kzalloc(sizeof(struct aa_policydb), gfp); if (!pdb) return NULL; kref_init(&pdb->count); return pdb; } /** * __add_profile - add a profiles to list and label tree * @list: list to add it to (NOT NULL) * @profile: the profile to add (NOT NULL) * * refcount @profile, should be put by __list_remove_profile * * Requires: namespace lock be held, or list not be shared */ static void __add_profile(struct list_head *list, struct aa_profile *profile) { struct aa_label *l; AA_BUG(!list); AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); list_add_rcu(&profile->base.list, list); /* get list reference */ aa_get_profile(profile); l = aa_label_insert(&profile->ns->labels, &profile->label); AA_BUG(l != &profile->label); aa_put_label(l); } /** * __list_remove_profile - remove a profile from the list it is on * @profile: the profile to remove (NOT NULL) * * remove a profile from the list, warning generally removal should * be done with __replace_profile as most profile removals are * replacements to the unconfined profile. * * put @profile list refcount * * Requires: namespace lock be held, or list not have been live */ static void __list_remove_profile(struct aa_profile *profile) { AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); list_del_rcu(&profile->base.list); aa_put_profile(profile); } /** * __remove_profile - remove old profile, and children * @profile: profile to be replaced (NOT NULL) * * Requires: namespace list lock be held, or list not be shared */ static void __remove_profile(struct aa_profile *profile) { AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); /* release any children lists first */ __aa_profile_list_release(&profile->base.profiles); /* released by free_profile */ aa_label_remove(&profile->label); __aafs_profile_rmdir(profile); __list_remove_profile(profile); } /** * __aa_profile_list_release - remove all profiles on the list and put refs * @head: list of profiles (NOT NULL) * * Requires: namespace lock be held */ void __aa_profile_list_release(struct list_head *head) { struct aa_profile *profile, *tmp; list_for_each_entry_safe(profile, tmp, head, base.list) __remove_profile(profile); } /** * aa_free_data - free a data blob * @ptr: data to free * @arg: unused */ static void aa_free_data(void *ptr, void *arg) { struct aa_data *data = ptr; kvfree_sensitive(data->data, data->size); kfree_sensitive(data->key); kfree_sensitive(data); } static void free_attachment(struct aa_attachment *attach) { int i; for (i = 0; i < attach->xattr_count; i++) kfree_sensitive(attach->xattrs[i]); kfree_sensitive(attach->xattrs); aa_put_pdb(attach->xmatch); } static void free_ruleset(struct aa_ruleset *rules) { int i; aa_put_pdb(rules->file); aa_put_pdb(rules->policy); aa_free_cap_rules(&rules->caps); aa_free_rlimit_rules(&rules->rlimits); for (i = 0; i < rules->secmark_count; i++) kfree_sensitive(rules->secmark[i].label); kfree_sensitive(rules->secmark); kfree_sensitive(rules); } struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp) { struct aa_ruleset *rules; rules = kzalloc(sizeof(*rules), gfp); if (rules) INIT_LIST_HEAD(&rules->list); return rules; } /** * aa_free_profile - free a profile * @profile: the profile to free (MAYBE NULL) * * Free a profile, its hats and null_profile. All references to the profile, * its hats and null_profile must have been put. * * If the profile was referenced from a task context, free_profile() will * be called from an rcu callback routine, so we must not sleep here. */ void aa_free_profile(struct aa_profile *profile) { struct aa_ruleset *rule, *tmp; struct rhashtable *rht; AA_DEBUG("%s(%p)\n", __func__, profile); if (!profile) return; /* free children profiles */ aa_policy_destroy(&profile->base); aa_put_profile(rcu_access_pointer(profile->parent)); aa_put_ns(profile->ns); kfree_sensitive(profile->rename); kfree_sensitive(profile->disconnected); free_attachment(&profile->attach); /* * at this point there are no tasks that can have a reference * to rules */ list_for_each_entry_safe(rule, tmp, &profile->rules, list) { list_del_init(&rule->list); free_ruleset(rule); } kfree_sensitive(profile->dirname); if (profile->data) { rht = profile->data; profile->data = NULL; rhashtable_free_and_destroy(rht, aa_free_data, NULL); kfree_sensitive(rht); } kfree_sensitive(profile->hash); aa_put_loaddata(profile->rawdata); aa_label_destroy(&profile->label); kfree_sensitive(profile); } /** * aa_alloc_profile - allocate, initialize and return a new profile * @hname: name of the profile (NOT NULL) * @proxy: proxy to use OR null if to allocate a new one * @gfp: allocation type * * Returns: refcount profile or NULL on failure */ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy, gfp_t gfp) { struct aa_profile *profile; struct aa_ruleset *rules; /* freed by free_profile - usually through aa_put_profile */ profile = kzalloc(struct_size(profile, label.vec, 2), gfp); if (!profile) return NULL; if (!aa_policy_init(&profile->base, NULL, hname, gfp)) goto fail; if (!aa_label_init(&profile->label, 1, gfp)) goto fail; INIT_LIST_HEAD(&profile->rules); /* allocate the first ruleset, but leave it empty */ rules = aa_alloc_ruleset(gfp); if (!rules) goto fail; list_add(&rules->list, &profile->rules); /* update being set needed by fs interface */ if (!proxy) { proxy = aa_alloc_proxy(&profile->label, gfp); if (!proxy) goto fail; } else aa_get_proxy(proxy); profile->label.proxy = proxy; profile->label.hname = profile->base.hname; profile->label.flags |= FLAG_PROFILE; profile->label.vec[0] = profile; /* refcount released by caller */ return profile; fail: aa_free_profile(profile); return NULL; } /* TODO: profile accounting - setup in remove */ /** * __strn_find_child - find a profile on @head list using substring of @name * @head: list to search (NOT NULL) * @name: name of profile (NOT NULL) * @len: length of @name substring to match * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ static struct aa_profile *__strn_find_child(struct list_head *head, const char *name, int len) { return (struct aa_profile *)__policy_strn_find(head, name, len); } /** * __find_child - find a profile on @head list with a name matching @name * @head: list to search (NOT NULL) * @name: name of profile (NOT NULL) * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ static struct aa_profile *__find_child(struct list_head *head, const char *name) { return __strn_find_child(head, name, strlen(name)); } /** * aa_find_child - find a profile by @name in @parent * @parent: profile to search (NOT NULL) * @name: profile name to search for (NOT NULL) * * Returns: a refcounted profile or NULL if not found */ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) { struct aa_profile *profile; rcu_read_lock(); do { profile = __find_child(&parent->base.profiles, name); } while (profile && !aa_get_profile_not0(profile)); rcu_read_unlock(); /* refcount released by caller */ return profile; } /** * __lookup_parent - lookup the parent of a profile of name @hname * @ns: namespace to lookup profile in (NOT NULL) * @hname: hierarchical profile name to find parent of (NOT NULL) * * Lookups up the parent of a fully qualified profile name, the profile * that matches hname does not need to exist, in general this * is used to load a new profile. * * Requires: rcu_read_lock be held * * Returns: unrefcounted policy or NULL if not found */ static struct aa_policy *__lookup_parent(struct aa_ns *ns, const char *hname) { struct aa_policy *policy; struct aa_profile *profile = NULL; char *split; policy = &ns->base; for (split = strstr(hname, "//"); split;) { profile = __strn_find_child(&policy->profiles, hname, split - hname); if (!profile) return NULL; policy = &profile->base; hname = split + 2; split = strstr(hname, "//"); } if (!profile) return &ns->base; return &profile->base; } /** * __create_missing_ancestors - create place holders for missing ancestores * @ns: namespace to lookup profile in (NOT NULL) * @hname: hierarchical profile name to find parent of (NOT NULL) * @gfp: type of allocation. * * Requires: ns mutex lock held * * Return: unrefcounted parent policy on success or %NULL if error creating * place holder profiles. */ static struct aa_policy *__create_missing_ancestors(struct aa_ns *ns, const char *hname, gfp_t gfp) { struct aa_policy *policy; struct aa_profile *parent, *profile = NULL; char *split; AA_BUG(!ns); AA_BUG(!hname); policy = &ns->base; for (split = strstr(hname, "//"); split;) { parent = profile; profile = __strn_find_child(&policy->profiles, hname, split - hname); if (!profile) { const char *name = kstrndup(hname, split - hname, gfp); if (!name) return NULL; profile = aa_alloc_null(parent, name, gfp); kfree(name); if (!profile) return NULL; if (!parent) profile->ns = aa_get_ns(ns); } policy = &profile->base; hname = split + 2; split = strstr(hname, "//"); } if (!profile) return &ns->base; return &profile->base; } /** * __lookupn_profile - lookup the profile matching @hname * @base: base list to start looking up profile name from (NOT NULL) * @hname: hierarchical profile name (NOT NULL) * @n: length of @hname * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile pointer or NULL if not found * * Do a relative name lookup, recursing through profile tree. */ static struct aa_profile *__lookupn_profile(struct aa_policy *base, const char *hname, size_t n) { struct aa_profile *profile = NULL; const char *split; for (split = strnstr(hname, "//", n); split; split = strnstr(hname, "//", n)) { profile = __strn_find_child(&base->profiles, hname, split - hname); if (!profile) return NULL; base = &profile->base; n -= split + 2 - hname; hname = split + 2; } if (n) return __strn_find_child(&base->profiles, hname, n); return NULL; } static struct aa_profile *__lookup_profile(struct aa_policy *base, const char *hname) { return __lookupn_profile(base, hname, strlen(hname)); } /** * aa_lookupn_profile - find a profile by its full or partial name * @ns: the namespace to start from (NOT NULL) * @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL) * @n: size of @hname * * Returns: refcounted profile or NULL if not found */ struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname, size_t n) { struct aa_profile *profile; rcu_read_lock(); do { profile = __lookupn_profile(&ns->base, hname, n); } while (profile && !aa_get_profile_not0(profile)); rcu_read_unlock(); /* the unconfined profile is not in the regular profile list */ if (!profile && strncmp(hname, "unconfined", n) == 0) profile = aa_get_newest_profile(ns->unconfined); /* refcount released by caller */ return profile; } struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, const char *fqname, size_t n) { struct aa_profile *profile; struct aa_ns *ns; const char *name, *ns_name; size_t ns_len; name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len); if (ns_name) { ns = aa_lookupn_ns(labels_ns(base), ns_name, ns_len); if (!ns) return NULL; } else ns = aa_get_ns(labels_ns(base)); if (name) profile = aa_lookupn_profile(ns, name, n - (name - fqname)); else if (ns) /* default profile for ns, currently unconfined */ profile = aa_get_newest_profile(ns->unconfined); else profile = NULL; aa_put_ns(ns); return profile; } struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name, gfp_t gfp) { struct aa_profile *profile; struct aa_ruleset *rules; profile = aa_alloc_profile(name, NULL, gfp); if (!profile) return NULL; /* TODO: ideally we should inherit abi from parent */ profile->label.flags |= FLAG_NULL; profile->attach.xmatch = aa_get_pdb(nullpdb); rules = list_first_entry(&profile->rules, typeof(*rules), list); rules->file = aa_get_pdb(nullpdb); rules->policy = aa_get_pdb(nullpdb); if (parent) { profile->path_flags = parent->path_flags; /* released on free_profile */ rcu_assign_pointer(profile->parent, aa_get_profile(parent)); profile->ns = aa_get_ns(parent->ns); } return profile; } /** * aa_new_learning_profile - create or find a null-X learning profile * @parent: profile that caused this profile to be created (NOT NULL) * @hat: true if the null- learning profile is a hat * @base: name to base the null profile off of * @gfp: type of allocation * * Find/Create a null- complain mode profile used in learning mode. The * name of the profile is unique and follows the format of parent//null-XXX. * where XXX is based on the @name or if that fails or is not supplied * a unique number * * null profiles are added to the profile list but the list does not * hold a count on them so that they are automatically released when * not in use. * * Returns: new refcounted profile else NULL on failure */ struct aa_profile *aa_new_learning_profile(struct aa_profile *parent, bool hat, const char *base, gfp_t gfp) { struct aa_profile *p, *profile; const char *bname; char *name = NULL; AA_BUG(!parent); if (base) { name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base), gfp); if (name) { sprintf(name, "%s//null-%s", parent->base.hname, base); goto name; } /* fall through to try shorter uniq */ } name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp); if (!name) return NULL; sprintf(name, "%s//null-%x", parent->base.hname, atomic_inc_return(&parent->ns->uniq_null)); name: /* lookup to see if this is a dup creation */ bname = basename(name); profile = aa_find_child(parent, bname); if (profile) goto out; profile = aa_alloc_null(parent, name, gfp); if (!profile) goto fail; profile->mode = APPARMOR_COMPLAIN; if (hat) profile->label.flags |= FLAG_HAT; mutex_lock_nested(&profile->ns->lock, profile->ns->level); p = __find_child(&parent->base.profiles, bname); if (p) { aa_free_profile(profile); profile = aa_get_profile(p); } else { __add_profile(&parent->base.profiles, profile); } mutex_unlock(&profile->ns->lock); /* refcount released by caller */ out: kfree(name); return profile; fail: kfree(name); aa_free_profile(profile); return NULL; } /** * replacement_allowed - test to see if replacement is allowed * @profile: profile to test if it can be replaced (MAYBE NULL) * @noreplace: true if replacement shouldn't be allowed but addition is okay * @info: Returns - info about why replacement failed (NOT NULL) * * Returns: %0 if replacement allowed else error code */ static int replacement_allowed(struct aa_profile *profile, int noreplace, const char **info) { if (profile) { if (profile->label.flags & FLAG_IMMUTIBLE) { *info = "cannot replace immutable profile"; return -EPERM; } else if (noreplace) { *info = "profile already exists"; return -EEXIST; } } return 0; } /* audit callback for net specific fields */ static void audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; struct apparmor_audit_data *ad = aad(sa); if (ad->iface.ns) { audit_log_format(ab, " ns="); audit_log_untrustedstring(ab, ad->iface.ns); } } /** * audit_policy - Do auditing of policy changes * @subj_label: label to check if it can manage policy * @op: policy operation being performed * @ns_name: name of namespace being manipulated * @name: name of profile being manipulated (NOT NULL) * @info: any extra information to be audited (MAYBE NULL) * @error: error code * * Returns: the error to be returned after audit is done */ static int audit_policy(struct aa_label *subj_label, const char *op, const char *ns_name, const char *name, const char *info, int error) { DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op); ad.iface.ns = ns_name; ad.name = name; ad.info = info; ad.error = error; ad.subj_label = subj_label; aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb); return error; } /* don't call out to other LSMs in the stack for apparmor policy admin * permissions */ static int policy_ns_capable(const struct cred *subj_cred, struct aa_label *label, struct user_namespace *userns, int cap) { int err; /* check for MAC_ADMIN cap in cred */ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE); if (!err) err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE); return err; } /** * aa_policy_view_capable - check if viewing policy in at @ns is allowed * @subj_cred: cred of subject * @label: label that is trying to view policy in ns * @ns: namespace being viewed by @label (may be NULL if @label's ns) * * Returns: true if viewing policy is allowed * * If @ns is NULL then the namespace being viewed is assumed to be the * tasks current namespace. */ bool aa_policy_view_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns) { struct user_namespace *user_ns = subj_cred->user_ns; struct aa_ns *view_ns = labels_view(label); bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) || in_egroup_p(make_kgid(user_ns, 0)); bool response = false; if (!ns) ns = view_ns; if (root_in_user_ns && aa_ns_visible(view_ns, ns, true) && (user_ns == &init_user_ns || (unprivileged_userns_apparmor_policy != 0 && user_ns->level == view_ns->level))) response = true; return response; } bool aa_policy_admin_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns) { struct user_namespace *user_ns = subj_cred->user_ns; bool capable = policy_ns_capable(subj_cred, label, user_ns, CAP_MAC_ADMIN) == 0; AA_DEBUG("cap_mac_admin? %d\n", capable); AA_DEBUG("policy locked? %d\n", aa_g_lock_policy); return aa_policy_view_capable(subj_cred, label, ns) && capable && !aa_g_lock_policy; } bool aa_current_policy_view_capable(struct aa_ns *ns) { struct aa_label *label; bool res; label = __begin_current_label_crit_section(); res = aa_policy_view_capable(current_cred(), label, ns); __end_current_label_crit_section(label); return res; } bool aa_current_policy_admin_capable(struct aa_ns *ns) { struct aa_label *label; bool res; label = __begin_current_label_crit_section(); res = aa_policy_admin_capable(current_cred(), label, ns); __end_current_label_crit_section(label); return res; } /** * aa_may_manage_policy - can the current task manage policy * @subj_cred: subjects cred * @label: label to check if it can manage policy * @ns: namespace being managed by @label (may be NULL if @label's ns) * @mask: contains the policy manipulation operation being done * * Returns: 0 if the task is allowed to manipulate policy else error */ int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns, u32 mask) { const char *op; if (mask & AA_MAY_REMOVE_POLICY) op = OP_PROF_RM; else if (mask & AA_MAY_REPLACE_POLICY) op = OP_PROF_REPL; else op = OP_PROF_LOAD; /* check if loading policy is locked out */ if (aa_g_lock_policy) return audit_policy(label, op, NULL, NULL, "policy_locked", -EACCES); if (!aa_policy_admin_capable(subj_cred, label, ns)) return audit_policy(label, op, NULL, NULL, "not policy admin", -EACCES); /* TODO: add fine grained mediation of policy loads */ return 0; } static struct aa_profile *__list_lookup_parent(struct list_head *lh, struct aa_profile *profile) { const char *base = basename(profile->base.hname); long len = base - profile->base.hname; struct aa_load_ent *ent; /* parent won't have trailing // so remove from len */ if (len <= 2) return NULL; len -= 2; list_for_each_entry(ent, lh, list) { if (ent->new == profile) continue; if (strncmp(ent->new->base.hname, profile->base.hname, len) == 0 && ent->new->base.hname[len] == 0) return ent->new; } return NULL; } /** * __replace_profile - replace @old with @new on a list * @old: profile to be replaced (NOT NULL) * @new: profile to replace @old with (NOT NULL) * * Will duplicate and refcount elements that @new inherits from @old * and will inherit @old children. * * refcount @new for list, put @old list refcount * * Requires: namespace list lock be held, or list not be shared */ static void __replace_profile(struct aa_profile *old, struct aa_profile *new) { struct aa_profile *child, *tmp; if (!list_empty(&old->base.profiles)) { LIST_HEAD(lh); list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu); list_for_each_entry_safe(child, tmp, &lh, base.list) { struct aa_profile *p; list_del_init(&child->base.list); p = __find_child(&new->base.profiles, child->base.name); if (p) { /* @p replaces @child */ __replace_profile(child, p); continue; } /* inherit @child and its children */ /* TODO: update hname of inherited children */ /* list refcount transferred to @new */ p = aa_deref_parent(child); rcu_assign_pointer(child->parent, aa_get_profile(new)); list_add_rcu(&child->base.list, &new->base.profiles); aa_put_profile(p); } } if (!rcu_access_pointer(new->parent)) { struct aa_profile *parent = aa_deref_parent(old); rcu_assign_pointer(new->parent, aa_get_profile(parent)); } aa_label_replace(&old->label, &new->label); /* migrate dents must come after label replacement b/c update */ __aafs_profile_migrate_dents(old, new); if (list_empty(&new->base.list)) { /* new is not on a list already */ list_replace_rcu(&old->base.list, &new->base.list); aa_get_profile(new); aa_put_profile(old); } else __list_remove_profile(old); } /** * __lookup_replace - lookup replacement information for a profile * @ns: namespace the lookup occurs in * @hname: name of profile to lookup * @noreplace: true if not replacing an existing profile * @p: Returns - profile to be replaced * @info: Returns - info string on why lookup failed * * Returns: profile to replace (no ref) on success else ptr error */ static int __lookup_replace(struct aa_ns *ns, const char *hname, bool noreplace, struct aa_profile **p, const char **info) { *p = aa_get_profile(__lookup_profile(&ns->base, hname)); if (*p) { int error = replacement_allowed(*p, noreplace, info); if (error) { *info = "profile can not be replaced"; return error; } } return 0; } static void share_name(struct aa_profile *old, struct aa_profile *new) { aa_put_str(new->base.hname); aa_get_str(old->base.hname); new->base.hname = old->base.hname; new->base.name = old->base.name; new->label.hname = old->label.hname; } /* Update to newest version of parent after previous replacements * Returns: unrefcount newest version of parent */ static struct aa_profile *update_to_newest_parent(struct aa_profile *new) { struct aa_profile *parent, *newest; parent = rcu_dereference_protected(new->parent, mutex_is_locked(&new->ns->lock)); newest = aa_get_newest_profile(parent); /* parent replaced in this atomic set? */ if (newest != parent) { aa_put_profile(parent); rcu_assign_pointer(new->parent, newest); } else aa_put_profile(newest); return newest; } /** * aa_replace_profiles - replace profile(s) on the profile list * @policy_ns: namespace load is occurring on * @label: label that is attempting to load/replace policy * @mask: permission mask * @udata: serialized data stream (NOT NULL) * * unpack and replace a profile on the profile list and uses of that profile * by any task creds via invalidating the old version of the profile, which * tasks will notice to update their own cred. If the profile does not exist * on the profile list it is added. * * Returns: size of data consumed else error code on failure. */ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label, u32 mask, struct aa_loaddata *udata) { const char *ns_name = NULL, *info = NULL; struct aa_ns *ns = NULL; struct aa_load_ent *ent, *tmp; struct aa_loaddata *rawdata_ent; const char *op; ssize_t count, error; LIST_HEAD(lh); op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD; aa_get_loaddata(udata); /* released below */ error = aa_unpack(udata, &lh, &ns_name); if (error) goto out; /* ensure that profiles are all for the same ns * TODO: update locking to remove this constaint. All profiles in * the load set must succeed as a set or the load will * fail. Sort ent list and take ns locks in hierarchy order */ count = 0; list_for_each_entry(ent, &lh, list) { if (ns_name) { if (ent->ns_name && strcmp(ent->ns_name, ns_name) != 0) { info = "policy load has mixed namespaces"; error = -EACCES; goto fail; } } else if (ent->ns_name) { if (count) { info = "policy load has mixed namespaces"; error = -EACCES; goto fail; } ns_name = ent->ns_name; } else count++; } if (ns_name) { ns = aa_prepare_ns(policy_ns ? policy_ns : labels_ns(label), ns_name); if (IS_ERR(ns)) { op = OP_PROF_LOAD; info = "failed to prepare namespace"; error = PTR_ERR(ns); ns = NULL; ent = NULL; goto fail; } } else ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label)); mutex_lock_nested(&ns->lock, ns->level); /* check for duplicate rawdata blobs: space and file dedup */ if (!list_empty(&ns->rawdata_list)) { list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) { if (aa_rawdata_eq(rawdata_ent, udata)) { struct aa_loaddata *tmp; tmp = __aa_get_loaddata(rawdata_ent); /* check we didn't fail the race */ if (tmp) { aa_put_loaddata(udata); udata = tmp; break; } } } } /* setup parent and ns info */ list_for_each_entry(ent, &lh, list) { struct aa_policy *policy; struct aa_profile *p; if (aa_g_export_binary) ent->new->rawdata = aa_get_loaddata(udata); error = __lookup_replace(ns, ent->new->base.hname, !(mask & AA_MAY_REPLACE_POLICY), &ent->old, &info); if (error) goto fail_lock; if (ent->new->rename) { error = __lookup_replace(ns, ent->new->rename, !(mask & AA_MAY_REPLACE_POLICY), &ent->rename, &info); if (error) goto fail_lock; } /* released when @new is freed */ ent->new->ns = aa_get_ns(ns); if (ent->old || ent->rename) continue; /* no ref on policy only use inside lock */ p = NULL; policy = __lookup_parent(ns, ent->new->base.hname); if (!policy) { /* first check for parent in the load set */ p = __list_lookup_parent(&lh, ent->new); if (!p) { /* * fill in missing parent with null * profile that doesn't have * permissions. This allows for * individual profile loading where * the child is loaded before the * parent, and outside of the current * atomic set. This unfortunately can * happen with some userspaces. The * null profile will be replaced once * the parent is loaded. */ policy = __create_missing_ancestors(ns, ent->new->base.hname, GFP_KERNEL); if (!policy) { error = -ENOENT; info = "parent does not exist"; goto fail_lock; } } } if (!p && policy != &ns->base) /* released on profile replacement or free_profile */ p = (struct aa_profile *) policy; rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); } /* create new fs entries for introspection if needed */ if (!udata->dents[AAFS_LOADDATA_DIR] && aa_g_export_binary) { error = __aa_fs_create_rawdata(ns, udata); if (error) { info = "failed to create raw_data dir and files"; ent = NULL; goto fail_lock; } } list_for_each_entry(ent, &lh, list) { if (!ent->old) { struct dentry *parent; if (rcu_access_pointer(ent->new->parent)) { struct aa_profile *p; p = aa_deref_parent(ent->new); parent = prof_child_dir(p); } else parent = ns_subprofs_dir(ent->new->ns); error = __aafs_profile_mkdir(ent->new, parent); } if (error) { info = "failed to create"; goto fail_lock; } } /* Done with checks that may fail - do actual replacement */ __aa_bump_ns_revision(ns); if (aa_g_export_binary) __aa_loaddata_update(udata, ns->revision); list_for_each_entry_safe(ent, tmp, &lh, list) { list_del_init(&ent->list); op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; if (ent->old && ent->old->rawdata == ent->new->rawdata && ent->new->rawdata) { /* dedup actual profile replacement */ audit_policy(label, op, ns_name, ent->new->base.hname, "same as current profile, skipping", error); /* break refcount cycle with proxy. */ aa_put_proxy(ent->new->label.proxy); ent->new->label.proxy = NULL; goto skip; } /* * TODO: finer dedup based on profile range in data. Load set * can differ but profile may remain unchanged */ audit_policy(label, op, ns_name, ent->new->base.hname, NULL, error); if (ent->old) { share_name(ent->old, ent->new); __replace_profile(ent->old, ent->new); } else { struct list_head *lh; if (rcu_access_pointer(ent->new->parent)) { struct aa_profile *parent; parent = update_to_newest_parent(ent->new); lh = &parent->base.profiles; } else lh = &ns->base.profiles; __add_profile(lh, ent->new); } skip: aa_load_ent_free(ent); } __aa_labelset_update_subtree(ns); mutex_unlock(&ns->lock); out: aa_put_ns(ns); aa_put_loaddata(udata); kfree(ns_name); if (error) return error; return udata->size; fail_lock: mutex_unlock(&ns->lock); /* audit cause of failure */ op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; fail: audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL, info, error); /* audit status that rest of profiles in the atomic set failed too */ info = "valid profile in failed atomic policy load"; list_for_each_entry(tmp, &lh, list) { if (tmp == ent) { info = "unchecked profile in failed atomic policy load"; /* skip entry that caused failure */ continue; } op = (!tmp->old) ? OP_PROF_LOAD : OP_PROF_REPL; audit_policy(label, op, ns_name, tmp->new->base.hname, info, error); } list_for_each_entry_safe(ent, tmp, &lh, list) { list_del_init(&ent->list); aa_load_ent_free(ent); } goto out; } /** * aa_remove_profiles - remove profile(s) from the system * @policy_ns: namespace the remove is being done from * @subj: label attempting to remove policy * @fqname: name of the profile or namespace to remove (NOT NULL) * @size: size of the name * * Remove a profile or sub namespace from the current namespace, so that * they can not be found anymore and mark them as replaced by unconfined * * NOTE: removing confinement does not restore rlimits to preconfinement values * * Returns: size of data consume else error code if fails */ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, char *fqname, size_t size) { struct aa_ns *ns = NULL; struct aa_profile *profile = NULL; const char *name = fqname, *info = NULL; const char *ns_name = NULL; ssize_t error = 0; if (*fqname == 0) { info = "no profile specified"; error = -ENOENT; goto fail; } if (fqname[0] == ':') { size_t ns_len; name = aa_splitn_fqname(fqname, size, &ns_name, &ns_len); /* released below */ ns = aa_lookupn_ns(policy_ns ? policy_ns : labels_ns(subj), ns_name, ns_len); if (!ns) { info = "namespace does not exist"; error = -ENOENT; goto fail; } } else /* released below */ ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(subj)); if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ mutex_lock_nested(&ns->parent->lock, ns->parent->level); __aa_bump_ns_revision(ns); __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); } else { /* remove profile */ mutex_lock_nested(&ns->lock, ns->level); profile = aa_get_profile(__lookup_profile(&ns->base, name)); if (!profile) { error = -ENOENT; info = "profile does not exist"; goto fail_ns_lock; } name = profile->base.hname; __aa_bump_ns_revision(ns); __remove_profile(profile); __aa_labelset_update_subtree(ns); mutex_unlock(&ns->lock); } /* don't fail removal if audit fails */ (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info, error); aa_put_ns(ns); aa_put_profile(profile); return size; fail_ns_lock: mutex_unlock(&ns->lock); aa_put_ns(ns); fail: (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info, error); return error; } |
36 15 4 9 8 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Patrick McHardy <kaber@trash.net> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nft_reject.h> #include <net/netfilter/ipv4/nf_reject.h> #include <net/netfilter/ipv6/nf_reject.h> static void nft_reject_inet_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_reject *priv = nft_expr_priv(expr); switch (nft_pf(pkt)) { case NFPROTO_IPV4: switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: nf_send_reset(nft_net(pkt), nft_sk(pkt), pkt->skb, nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nf_send_unreach(pkt->skb, nft_reject_icmp_code(priv->icmp_code), nft_hook(pkt)); break; } break; case NFPROTO_IPV6: switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: nf_send_unreach6(nft_net(pkt), pkt->skb, priv->icmp_code, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: nf_send_reset6(nft_net(pkt), nft_sk(pkt), pkt->skb, nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nf_send_unreach6(nft_net(pkt), pkt->skb, nft_reject_icmpv6_code(priv->icmp_code), nft_hook(pkt)); break; } break; } regs->verdict.code = NF_DROP; } static int nft_reject_inet_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_INGRESS)); } static struct nft_expr_type nft_reject_inet_type; static const struct nft_expr_ops nft_reject_inet_ops = { .type = &nft_reject_inet_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), .eval = nft_reject_inet_eval, .init = nft_reject_init, .dump = nft_reject_dump, .validate = nft_reject_inet_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_reject_inet_type __read_mostly = { .family = NFPROTO_INET, .name = "reject", .ops = &nft_reject_inet_ops, .policy = nft_reject_policy, .maxattr = NFTA_REJECT_MAX, .owner = THIS_MODULE, }; static int __init nft_reject_inet_module_init(void) { return nft_register_expr(&nft_reject_inet_type); } static void __exit nft_reject_inet_module_exit(void) { nft_unregister_expr(&nft_reject_inet_type); } module_init(nft_reject_inet_module_init); module_exit(nft_reject_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS_NFT_AF_EXPR(1, "reject"); MODULE_DESCRIPTION("Netfilter nftables reject inet support"); |
472 3451 871 6080 5796 1230 52 252 53 27 408 7 3 1 3 10 10 76 76 26 26 11 11 95 97 6 7 25 25 16087 16152 11 11 9 9 2 2 1277 1277 156 156 149 149 532 536 534 408 408 410 55 55 2 2 554 555 247 247 37 37 257 257 32 33 122 122 310 310 39 40 21 21 412 412 412 78 79 56 56 5801 5796 5812 3313 3323 3314 19 19 349 26 327 349 349 349 472 473 358 360 36 36 155 155 51 51 40 40 33 34 31 31 147 148 148 18 1 17 37 2 36 58 1 57 8 8 293 293 14 14 20 20 47 47 30 30 151 151 39 40 53 53 96 97 74 22 97 23 23 742 743 6085 6 6093 290 14 276 231 12 220 223 4 219 57 3 54 54 53 61 2 59 25 25 11 1 10 26 1 25 20 20 33 33 25 3 22 35 2 33 27 2 25 25 24 9 9 415 414 6 6 13 2 11 3 2 1 21 2 20 5544 5548 8009 8074 6085 6080 6098 2303 2309 2817 2829 2816 12402 12459 637 691 2 3 1313 1314 1549 1549 123 123 113 117 373 377 45 47 88 89 20 20 2807 2811 2457 2458 131 132 1229 1230 1231 66 66 5 5 5 4 4 867 869 872 1 1 147 147 411 413 14 14 14 14 9 9 13 14 12 12 6 6 2 2 3673 3677 14 14 30 30 19 19 1 1 11 11 53 53 18 18 36 37 83 84 332 327 331 432 433 12 12 107 109 53 53 53 58 58 16 16 16 1 1 36 38 25 25 24 24 26 26 26 32 33 28 28 10 10 10 1 1 42 43 5929 5928 4820 18 4 1 8 1 1 4 6 10 6 2 2 6 3 5 12 1 1 1 3 1 1 1 4 6 7 42 43 11064 11117 4 4 35 35 385 385 1 1 384 385 16 18 17 17 14 14 115 115 1468 1471 1377 1380 137 137 885 891 976 976 168 168 175 175 14521 14596 1907 1920 64 64 35 36 1740 1769 4173 4228 188 188 14482 14538 1 1 11341 11389 1597 1599 1597 1602 652 654 59 59 1306 1315 67 67 100 100 64 64 58 58 114 114 114 45 117 117 5 5 128 129 26 26 82 83 481 484 43 43 216 216 12 12 1 1 37 37 12 12 6 6 53 54 14 14 194 194 4 4 338 340 298 300 248 252 253 634 642 4 4 108 108 2258 2280 124 124 329 330 123 124 1094 1097 770 773 178 178 27 27 27 5 5 5 27 27 50 50 32 32 487 487 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Security plug functions * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2016 Mellanox Technologies * Copyright (C) 2023 Microsoft Corporation <paul@paul-moore.com> */ #define pr_fmt(fmt) "LSM: " fmt #include <linux/bpf.h> #include <linux/capability.h> #include <linux/dcache.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kernel_read_file.h> #include <linux/lsm_hooks.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/personality.h> #include <linux/backing-dev.h> #include <linux/string.h> #include <linux/xattr.h> #include <linux/msg.h> #include <linux/overflow.h> #include <linux/perf_event.h> #include <linux/fs.h> #include <net/flow.h> #include <net/sock.h> #define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX /* * Identifier for the LSM static calls. * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT */ #define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX /* * Call the macro M for each LSM hook MAX_LSM_COUNT times. */ #define LSM_LOOP_UNROLL(M, ...) \ do { \ UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ } while (0) #define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) /* * These are descriptions of the reasons that can be passed to the * security_locked_down() LSM hook. Placing this array here allows * all security modules to use the same descriptions for auditing * purposes. */ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = { [LOCKDOWN_NONE] = "none", [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading", [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port", [LOCKDOWN_EFI_TEST] = "/dev/efi_test access", [LOCKDOWN_KEXEC] = "kexec of unsigned images", [LOCKDOWN_HIBERNATION] = "hibernation", [LOCKDOWN_PCI_ACCESS] = "direct PCI access", [LOCKDOWN_IOPORT] = "raw io port access", [LOCKDOWN_MSR] = "raw MSR access", [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables", [LOCKDOWN_DEVICE_TREE] = "modifying device tree contents", [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage", [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO", [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", [LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM", [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", [LOCKDOWN_XFRM_SECRET] = "xfrm SA secret", [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain); static struct kmem_cache *lsm_file_cache; static struct kmem_cache *lsm_inode_cache; char *lsm_names; static struct lsm_blob_sizes blob_sizes __ro_after_init; /* Boot-time LSM user choice */ static __initdata const char *chosen_lsm_order; static __initdata const char *chosen_major_lsm; static __initconst const char *const builtin_lsm_order = CONFIG_LSM; /* Ordered list of LSMs to initialize. */ static __initdata struct lsm_info *ordered_lsms[MAX_LSM_COUNT + 1]; static __initdata struct lsm_info *exclusive; #ifdef CONFIG_HAVE_STATIC_CALL #define LSM_HOOK_TRAMP(NAME, NUM) \ &STATIC_CALL_TRAMP(LSM_STATIC_CALL(NAME, NUM)) #else #define LSM_HOOK_TRAMP(NAME, NUM) NULL #endif /* * Define static calls and static keys for each LSM hook. */ #define DEFINE_LSM_STATIC_CALL(NUM, NAME, RET, ...) \ DEFINE_STATIC_CALL_NULL(LSM_STATIC_CALL(NAME, NUM), \ *((RET(*)(__VA_ARGS__))NULL)); \ DEFINE_STATIC_KEY_FALSE(SECURITY_HOOK_ACTIVE_KEY(NAME, NUM)); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ LSM_DEFINE_UNROLL(DEFINE_LSM_STATIC_CALL, NAME, RET, __VA_ARGS__) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef DEFINE_LSM_STATIC_CALL /* * Initialise a table of static calls for each LSM hook. * DEFINE_STATIC_CALL_NULL invocation above generates a key (STATIC_CALL_KEY) * and a trampoline (STATIC_CALL_TRAMP) which are used to call * __static_call_update when updating the static call. * * The static calls table is used by early LSMs, some architectures can fault on * unaligned accesses and the fault handling code may not be ready by then. * Thus, the static calls table should be aligned to avoid any unhandled faults * in early init. */ struct lsm_static_calls_table static_calls_table __ro_after_init __aligned(sizeof(u64)) = { #define INIT_LSM_STATIC_CALL(NUM, NAME) \ (struct lsm_static_call) { \ .key = &STATIC_CALL_KEY(LSM_STATIC_CALL(NAME, NUM)), \ .trampoline = LSM_HOOK_TRAMP(NAME, NUM), \ .active = &SECURITY_HOOK_ACTIVE_KEY(NAME, NUM), \ }, #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ .NAME = { \ LSM_DEFINE_UNROLL(INIT_LSM_STATIC_CALL, NAME) \ }, #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef INIT_LSM_STATIC_CALL }; static __initdata bool debug; #define init_debug(...) \ do { \ if (debug) \ pr_info(__VA_ARGS__); \ } while (0) static bool __init is_enabled(struct lsm_info *lsm) { if (!lsm->enabled) return false; return *lsm->enabled; } /* Mark an LSM's enabled flag. */ static int lsm_enabled_true __initdata = 1; static int lsm_enabled_false __initdata = 0; static void __init set_enabled(struct lsm_info *lsm, bool enabled) { /* * When an LSM hasn't configured an enable variable, we can use * a hard-coded location for storing the default enabled state. */ if (!lsm->enabled) { if (enabled) lsm->enabled = &lsm_enabled_true; else lsm->enabled = &lsm_enabled_false; } else if (lsm->enabled == &lsm_enabled_true) { if (!enabled) lsm->enabled = &lsm_enabled_false; } else if (lsm->enabled == &lsm_enabled_false) { if (enabled) lsm->enabled = &lsm_enabled_true; } else { *lsm->enabled = enabled; } } /* Is an LSM already listed in the ordered LSMs list? */ static bool __init exists_ordered_lsm(struct lsm_info *lsm) { struct lsm_info **check; for (check = ordered_lsms; *check; check++) if (*check == lsm) return true; return false; } /* Append an LSM to the list of ordered LSMs to initialize. */ static int last_lsm __initdata; static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from) { /* Ignore duplicate selections. */ if (exists_ordered_lsm(lsm)) return; if (WARN(last_lsm == MAX_LSM_COUNT, "%s: out of LSM static calls!?\n", from)) return; /* Enable this LSM, if it is not already set. */ if (!lsm->enabled) lsm->enabled = &lsm_enabled_true; ordered_lsms[last_lsm++] = lsm; init_debug("%s ordered: %s (%s)\n", from, lsm->name, is_enabled(lsm) ? "enabled" : "disabled"); } /* Is an LSM allowed to be initialized? */ static bool __init lsm_allowed(struct lsm_info *lsm) { /* Skip if the LSM is disabled. */ if (!is_enabled(lsm)) return false; /* Not allowed if another exclusive LSM already initialized. */ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) { init_debug("exclusive disabled: %s\n", lsm->name); return false; } return true; } static void __init lsm_set_blob_size(int *need, int *lbs) { int offset; if (*need <= 0) return; offset = ALIGN(*lbs, sizeof(void *)); *lbs = offset + *need; *need = offset; } static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) { if (!needed) return; lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred); lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file); lsm_set_blob_size(&needed->lbs_ib, &blob_sizes.lbs_ib); /* * The inode blob gets an rcu_head in addition to * what the modules might need. */ if (needed->lbs_inode && blob_sizes.lbs_inode == 0) blob_sizes.lbs_inode = sizeof(struct rcu_head); lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); lsm_set_blob_size(&needed->lbs_key, &blob_sizes.lbs_key); lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); lsm_set_blob_size(&needed->lbs_perf_event, &blob_sizes.lbs_perf_event); lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock); lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); lsm_set_blob_size(&needed->lbs_tun_dev, &blob_sizes.lbs_tun_dev); lsm_set_blob_size(&needed->lbs_xattr_count, &blob_sizes.lbs_xattr_count); lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev); } /* Prepare LSM for initialization. */ static void __init prepare_lsm(struct lsm_info *lsm) { int enabled = lsm_allowed(lsm); /* Record enablement (to handle any following exclusive LSMs). */ set_enabled(lsm, enabled); /* If enabled, do pre-initialization work. */ if (enabled) { if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) { exclusive = lsm; init_debug("exclusive chosen: %s\n", lsm->name); } lsm_set_blob_sizes(lsm->blobs); } } /* Initialize a given LSM, if it is enabled. */ static void __init initialize_lsm(struct lsm_info *lsm) { if (is_enabled(lsm)) { int ret; init_debug("initializing %s\n", lsm->name); ret = lsm->init(); WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret); } } /* * Current index to use while initializing the lsm id list. */ u32 lsm_active_cnt __ro_after_init; const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; /* Populate ordered LSMs list from comma-separated LSM name list. */ static void __init ordered_lsm_parse(const char *order, const char *origin) { struct lsm_info *lsm; char *sep, *name, *next; /* LSM_ORDER_FIRST is always first. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (lsm->order == LSM_ORDER_FIRST) append_ordered_lsm(lsm, " first"); } /* Process "security=", if given. */ if (chosen_major_lsm) { struct lsm_info *major; /* * To match the original "security=" behavior, this * explicitly does NOT fallback to another Legacy Major * if the selected one was separately disabled: disable * all non-matching Legacy Major LSMs. */ for (major = __start_lsm_info; major < __end_lsm_info; major++) { if ((major->flags & LSM_FLAG_LEGACY_MAJOR) && strcmp(major->name, chosen_major_lsm) != 0) { set_enabled(major, false); init_debug("security=%s disabled: %s (only one legacy major LSM)\n", chosen_major_lsm, major->name); } } } sep = kstrdup(order, GFP_KERNEL); next = sep; /* Walk the list, looking for matching LSMs. */ while ((name = strsep(&next, ",")) != NULL) { bool found = false; for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (strcmp(lsm->name, name) == 0) { if (lsm->order == LSM_ORDER_MUTABLE) append_ordered_lsm(lsm, origin); found = true; } } if (!found) init_debug("%s ignored: %s (not built into kernel)\n", origin, name); } /* Process "security=", if given. */ if (chosen_major_lsm) { for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (exists_ordered_lsm(lsm)) continue; if (strcmp(lsm->name, chosen_major_lsm) == 0) append_ordered_lsm(lsm, "security="); } } /* LSM_ORDER_LAST is always last. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (lsm->order == LSM_ORDER_LAST) append_ordered_lsm(lsm, " last"); } /* Disable all LSMs not in the ordered list. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (exists_ordered_lsm(lsm)) continue; set_enabled(lsm, false); init_debug("%s skipped: %s (not in requested order)\n", origin, lsm->name); } kfree(sep); } static void __init lsm_static_call_init(struct security_hook_list *hl) { struct lsm_static_call *scall = hl->scalls; int i; for (i = 0; i < MAX_LSM_COUNT; i++) { /* Update the first static call that is not used yet */ if (!scall->hl) { __static_call_update(scall->key, scall->trampoline, hl->hook.lsm_func_addr); scall->hl = hl; static_branch_enable(scall->active); return; } scall++; } panic("%s - Ran out of static slots.\n", __func__); } static void __init lsm_early_cred(struct cred *cred); static void __init lsm_early_task(struct task_struct *task); static int lsm_append(const char *new, char **result); static void __init report_lsm_order(void) { struct lsm_info **lsm, *early; int first = 0; pr_info("initializing lsm="); /* Report each enabled LSM name, comma separated. */ for (early = __start_early_lsm_info; early < __end_early_lsm_info; early++) if (is_enabled(early)) pr_cont("%s%s", first++ == 0 ? "" : ",", early->name); for (lsm = ordered_lsms; *lsm; lsm++) if (is_enabled(*lsm)) pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name); pr_cont("\n"); } static void __init ordered_lsm_init(void) { struct lsm_info **lsm; if (chosen_lsm_order) { if (chosen_major_lsm) { pr_warn("security=%s is ignored because it is superseded by lsm=%s\n", chosen_major_lsm, chosen_lsm_order); chosen_major_lsm = NULL; } ordered_lsm_parse(chosen_lsm_order, "cmdline"); } else ordered_lsm_parse(builtin_lsm_order, "builtin"); for (lsm = ordered_lsms; *lsm; lsm++) prepare_lsm(*lsm); report_lsm_order(); init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); init_debug("file blob size = %d\n", blob_sizes.lbs_file); init_debug("ib blob size = %d\n", blob_sizes.lbs_ib); init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); #ifdef CONFIG_KEYS init_debug("key blob size = %d\n", blob_sizes.lbs_key); #endif /* CONFIG_KEYS */ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); init_debug("sock blob size = %d\n", blob_sizes.lbs_sock); init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); init_debug("perf event blob size = %d\n", blob_sizes.lbs_perf_event); init_debug("task blob size = %d\n", blob_sizes.lbs_task); init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev); init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count); init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev); /* * Create any kmem_caches needed for blobs */ if (blob_sizes.lbs_file) lsm_file_cache = kmem_cache_create("lsm_file_cache", blob_sizes.lbs_file, 0, SLAB_PANIC, NULL); if (blob_sizes.lbs_inode) lsm_inode_cache = kmem_cache_create("lsm_inode_cache", blob_sizes.lbs_inode, 0, SLAB_PANIC, NULL); lsm_early_cred((struct cred *) current->cred); lsm_early_task(current); for (lsm = ordered_lsms; *lsm; lsm++) initialize_lsm(*lsm); } int __init early_security_init(void) { struct lsm_info *lsm; for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { if (!lsm->enabled) lsm->enabled = &lsm_enabled_true; prepare_lsm(lsm); initialize_lsm(lsm); } return 0; } /** * security_init - initializes the security framework * * This should be called early in the kernel initialization sequence. */ int __init security_init(void) { struct lsm_info *lsm; init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*"); init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order); init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*"); /* * Append the names of the early LSM modules now that kmalloc() is * available */ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { init_debug(" early started: %s (%s)\n", lsm->name, is_enabled(lsm) ? "enabled" : "disabled"); if (lsm->enabled) lsm_append(lsm->name, &lsm_names); } /* Load LSMs in specified order. */ ordered_lsm_init(); return 0; } /* Save user chosen LSM */ static int __init choose_major_lsm(char *str) { chosen_major_lsm = str; return 1; } __setup("security=", choose_major_lsm); /* Explicitly choose LSM initialization order. */ static int __init choose_lsm_order(char *str) { chosen_lsm_order = str; return 1; } __setup("lsm=", choose_lsm_order); /* Enable LSM order debugging. */ static int __init enable_debug(char *str) { debug = true; return 1; } __setup("lsm.debug", enable_debug); static bool match_last_lsm(const char *list, const char *lsm) { const char *last; if (WARN_ON(!list || !lsm)) return false; last = strrchr(list, ','); if (last) /* Pass the comma, strcmp() will check for '\0' */ last++; else last = list; return !strcmp(last, lsm); } static int lsm_append(const char *new, char **result) { char *cp; if (*result == NULL) { *result = kstrdup(new, GFP_KERNEL); if (*result == NULL) return -ENOMEM; } else { /* Check if it is the last registered name */ if (match_last_lsm(*result, new)) return 0; cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new); if (cp == NULL) return -ENOMEM; kfree(*result); *result = cp; } return 0; } /** * security_add_hooks - Add a modules hooks to the hook lists. * @hooks: the hooks to add * @count: the number of hooks to add * @lsmid: the identification information for the security module * * Each LSM has to register its hooks with the infrastructure. */ void __init security_add_hooks(struct security_hook_list *hooks, int count, const struct lsm_id *lsmid) { int i; /* * A security module may call security_add_hooks() more * than once during initialization, and LSM initialization * is serialized. Landlock is one such case. * Look at the previous entry, if there is one, for duplication. */ if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) { if (lsm_active_cnt >= MAX_LSM_COUNT) panic("%s Too many LSMs registered.\n", __func__); lsm_idlist[lsm_active_cnt++] = lsmid; } for (i = 0; i < count; i++) { hooks[i].lsmid = lsmid; lsm_static_call_init(&hooks[i]); } /* * Don't try to append during early_security_init(), we'll come back * and fix this up afterwards. */ if (slab_is_available()) { if (lsm_append(lsmid->name, &lsm_names) < 0) panic("%s - Cannot get early memory.\n", __func__); } } int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return blocking_notifier_call_chain(&blocking_lsm_notifier_chain, event, data); } EXPORT_SYMBOL(call_blocking_lsm_notifier); int register_blocking_lsm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&blocking_lsm_notifier_chain, nb); } EXPORT_SYMBOL(register_blocking_lsm_notifier); int unregister_blocking_lsm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain, nb); } EXPORT_SYMBOL(unregister_blocking_lsm_notifier); /** * lsm_blob_alloc - allocate a composite blob * @dest: the destination for the blob * @size: the size of the blob * @gfp: allocation type * * Allocate a blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp) { if (size == 0) { *dest = NULL; return 0; } *dest = kzalloc(size, gfp); if (*dest == NULL) return -ENOMEM; return 0; } /** * lsm_cred_alloc - allocate a composite cred blob * @cred: the cred that needs a blob * @gfp: allocation type * * Allocate the cred blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_cred_alloc(struct cred *cred, gfp_t gfp) { return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp); } /** * lsm_early_cred - during initialization allocate a composite cred blob * @cred: the cred that needs a blob * * Allocate the cred blob for all the modules */ static void __init lsm_early_cred(struct cred *cred) { int rc = lsm_cred_alloc(cred, GFP_KERNEL); if (rc) panic("%s: Early cred alloc failed.\n", __func__); } /** * lsm_file_alloc - allocate a composite file blob * @file: the file that needs a blob * * Allocate the file blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_file_alloc(struct file *file) { if (!lsm_file_cache) { file->f_security = NULL; return 0; } file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); if (file->f_security == NULL) return -ENOMEM; return 0; } /** * lsm_inode_alloc - allocate a composite inode blob * @inode: the inode that needs a blob * @gfp: allocation flags * * Allocate the inode blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_inode_alloc(struct inode *inode, gfp_t gfp) { if (!lsm_inode_cache) { inode->i_security = NULL; return 0; } inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp); if (inode->i_security == NULL) return -ENOMEM; return 0; } /** * lsm_task_alloc - allocate a composite task blob * @task: the task that needs a blob * * Allocate the task blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_task_alloc(struct task_struct *task) { return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL); } /** * lsm_ipc_alloc - allocate a composite ipc blob * @kip: the ipc that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_ipc_alloc(struct kern_ipc_perm *kip) { return lsm_blob_alloc(&kip->security, blob_sizes.lbs_ipc, GFP_KERNEL); } #ifdef CONFIG_KEYS /** * lsm_key_alloc - allocate a composite key blob * @key: the key that needs a blob * * Allocate the key blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_key_alloc(struct key *key) { return lsm_blob_alloc(&key->security, blob_sizes.lbs_key, GFP_KERNEL); } #endif /* CONFIG_KEYS */ /** * lsm_msg_msg_alloc - allocate a composite msg_msg blob * @mp: the msg_msg that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_msg_msg_alloc(struct msg_msg *mp) { return lsm_blob_alloc(&mp->security, blob_sizes.lbs_msg_msg, GFP_KERNEL); } /** * lsm_bdev_alloc - allocate a composite block_device blob * @bdev: the block_device that needs a blob * * Allocate the block_device blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bdev_alloc(struct block_device *bdev) { if (blob_sizes.lbs_bdev == 0) { bdev->bd_security = NULL; return 0; } bdev->bd_security = kzalloc(blob_sizes.lbs_bdev, GFP_KERNEL); if (!bdev->bd_security) return -ENOMEM; return 0; } /** * lsm_early_task - during initialization allocate a composite task blob * @task: the task that needs a blob * * Allocate the task blob for all the modules */ static void __init lsm_early_task(struct task_struct *task) { int rc = lsm_task_alloc(task); if (rc) panic("%s: Early task alloc failed.\n", __func__); } /** * lsm_superblock_alloc - allocate a composite superblock blob * @sb: the superblock that needs a blob * * Allocate the superblock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_superblock_alloc(struct super_block *sb) { return lsm_blob_alloc(&sb->s_security, blob_sizes.lbs_superblock, GFP_KERNEL); } /** * lsm_fill_user_ctx - Fill a user space lsm_ctx structure * @uctx: a userspace LSM context to be filled * @uctx_len: available uctx size (input), used uctx size (output) * @val: the new LSM context value * @val_len: the size of the new LSM context value * @id: LSM id * @flags: LSM defined flags * * Fill all of the fields in a userspace lsm_ctx structure. If @uctx is NULL * simply calculate the required size to output via @utc_len and return * success. * * Returns 0 on success, -E2BIG if userspace buffer is not large enough, * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated. */ int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, void *val, size_t val_len, u64 id, u64 flags) { struct lsm_ctx *nctx = NULL; size_t nctx_len; int rc = 0; nctx_len = ALIGN(struct_size(nctx, ctx, val_len), sizeof(void *)); if (nctx_len > *uctx_len) { rc = -E2BIG; goto out; } /* no buffer - return success/0 and set @uctx_len to the req size */ if (!uctx) goto out; nctx = kzalloc(nctx_len, GFP_KERNEL); if (nctx == NULL) { rc = -ENOMEM; goto out; } nctx->id = id; nctx->flags = flags; nctx->len = nctx_len; nctx->ctx_len = val_len; memcpy(nctx->ctx, val, val_len); if (copy_to_user(uctx, nctx, nctx_len)) rc = -EFAULT; out: kfree(nctx); *uctx_len = nctx_len; return rc; } /* * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and * can be accessed with: * * LSM_RET_DEFAULT(<hook_name>) * * The macros below define static constants for the default value of each * LSM hook. */ #define LSM_RET_DEFAULT(NAME) (NAME##_default) #define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME) #define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \ static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK /* * Hook list operation macros. * * call_void_hook: * This is a hook that does not return a value. * * call_int_hook: * This is a hook that returns a value. */ #define __CALL_STATIC_VOID(NUM, HOOK, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ } \ } while (0); #define call_void_hook(HOOK, ...) \ do { \ LSM_LOOP_UNROLL(__CALL_STATIC_VOID, HOOK, __VA_ARGS__); \ } while (0) #define __CALL_STATIC_INT(NUM, R, HOOK, LABEL, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ if (R != LSM_RET_DEFAULT(HOOK)) \ goto LABEL; \ } \ } while (0); #define call_int_hook(HOOK, ...) \ ({ \ __label__ OUT; \ int RC = LSM_RET_DEFAULT(HOOK); \ \ LSM_LOOP_UNROLL(__CALL_STATIC_INT, RC, HOOK, OUT, __VA_ARGS__); \ OUT: \ RC; \ }) #define lsm_for_each_hook(scall, NAME) \ for (scall = static_calls_table.NAME; \ scall - static_calls_table.NAME < MAX_LSM_COUNT; scall++) \ if (static_key_enabled(&scall->active->key)) /* Security operations */ /** * security_binder_set_context_mgr() - Check if becoming binder ctx mgr is ok * @mgr: task credentials of current binder process * * Check whether @mgr is allowed to be the binder context manager. * * Return: Return 0 if permission is granted. */ int security_binder_set_context_mgr(const struct cred *mgr) { return call_int_hook(binder_set_context_mgr, mgr); } /** * security_binder_transaction() - Check if a binder transaction is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to invoke a binder transaction call to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transaction(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transaction, from, to); } /** * security_binder_transfer_binder() - Check if a binder transfer is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to transfer a binder reference to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_binder(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transfer_binder, from, to); } /** * security_binder_transfer_file() - Check if a binder file xfer is allowed * @from: sending process * @to: receiving process * @file: file being transferred * * Check whether @from is allowed to transfer @file to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_file(const struct cred *from, const struct cred *to, const struct file *file) { return call_int_hook(binder_transfer_file, from, to, file); } /** * security_ptrace_access_check() - Check if tracing is allowed * @child: target process * @mode: PTRACE_MODE flags * * Check permission before allowing the current process to trace the @child * process. Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of tracing check * during an execve in the bprm_set_creds hook of binprm_security_ops if the * process is being traced and its security attributes would be changed by the * execve. * * Return: Returns 0 if permission is granted. */ int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { return call_int_hook(ptrace_access_check, child, mode); } /** * security_ptrace_traceme() - Check if tracing is allowed * @parent: tracing process * * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself to the * @parent process for tracing. * * Return: Returns 0 if permission is granted. */ int security_ptrace_traceme(struct task_struct *parent) { return call_int_hook(ptrace_traceme, parent); } /** * security_capget() - Get the capability sets for a process * @target: target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Get the @effective, @inheritable, and @permitted capability sets for the * @target process. The hook may also perform permission checking to determine * if the current process is allowed to see the capability sets of the @target * process. * * Return: Returns 0 if the capability sets were successfully obtained. */ int security_capget(const struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { return call_int_hook(capget, target, effective, inheritable, permitted); } /** * security_capset() - Set the capability sets for a process * @new: new credentials for the target process * @old: current credentials of the target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Set the @effective, @inheritable, and @permitted capability sets for the * current process. * * Return: Returns 0 and update @new if permission is granted. */ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { return call_int_hook(capset, new, old, effective, inheritable, permitted); } /** * security_capable() - Check if a process has the necessary capability * @cred: credentials to examine * @ns: user namespace * @cap: capability requested * @opts: capability check options * * Check whether the @tsk process has the @cap capability in the indicated * credentials. @cap contains the capability <include/linux/capability.h>. * @opts contains options for the capable check <include/linux/security.h>. * * Return: Returns 0 if the capability is granted. */ int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) { return call_int_hook(capable, cred, ns, cap, opts); } /** * security_quotactl() - Check if a quotactl() syscall is allowed for this fs * @cmds: commands * @type: type * @id: id * @sb: filesystem * * Check whether the quotactl syscall is allowed for this @sb. * * Return: Returns 0 if permission is granted. */ int security_quotactl(int cmds, int type, int id, const struct super_block *sb) { return call_int_hook(quotactl, cmds, type, id, sb); } /** * security_quota_on() - Check if QUOTAON is allowed for a dentry * @dentry: dentry * * Check whether QUOTAON is allowed for @dentry. * * Return: Returns 0 if permission is granted. */ int security_quota_on(struct dentry *dentry) { return call_int_hook(quota_on, dentry); } /** * security_syslog() - Check if accessing the kernel message ring is allowed * @type: SYSLOG_ACTION_* type * * Check permission before accessing the kernel message ring or changing * logging to the console. See the syslog(2) manual page for an explanation of * the @type values. * * Return: Return 0 if permission is granted. */ int security_syslog(int type) { return call_int_hook(syslog, type); } /** * security_settime64() - Check if changing the system time is allowed * @ts: new time * @tz: timezone * * Check permission to change the system time, struct timespec64 is defined in * <include/linux/time64.h> and timezone is defined in <include/linux/time.h>. * * Return: Returns 0 if permission is granted. */ int security_settime64(const struct timespec64 *ts, const struct timezone *tz) { return call_int_hook(settime, ts, tz); } /** * security_vm_enough_memory_mm() - Check if allocating a new mem map is allowed * @mm: mm struct * @pages: number of pages * * Check permissions for allocating a new virtual mapping. If all LSMs return * a positive value, __vm_enough_memory() will be called with cap_sys_admin * set. If at least one LSM returns 0 or negative, __vm_enough_memory() will be * called with cap_sys_admin cleared. * * Return: Returns 0 if permission is granted by the LSM infrastructure to the * caller. */ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { struct lsm_static_call *scall; int cap_sys_admin = 1; int rc; /* * The module will respond with 0 if it thinks the __vm_enough_memory() * call should be made with the cap_sys_admin set. If all of the modules * agree that it should be set it will. If any module thinks it should * not be set it won't. */ lsm_for_each_hook(scall, vm_enough_memory) { rc = scall->hl->hook.vm_enough_memory(mm, pages); if (rc < 0) { cap_sys_admin = 0; break; } } return __vm_enough_memory(mm, pages, cap_sys_admin); } /** * security_bprm_creds_for_exec() - Prepare the credentials for exec() * @bprm: binary program information * * If the setup in prepare_exec_creds did not setup @bprm->cred->security * properly for executing @bprm->file, update the LSM's portion of * @bprm->cred->security to be what commit_creds needs to install for the new * program. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm * contains the linux_binprm structure. * * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is * set. The result must be the same as without this flag even if the execution * will never really happen and @bprm will always be dropped. * * This hook must not change current->cred, only @bprm->cred. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_for_exec(struct linux_binprm *bprm) { return call_int_hook(bprm_creds_for_exec, bprm); } /** * security_bprm_creds_from_file() - Update linux_binprm creds based on file * @bprm: binary program information * @file: associated file * * If @file is setpcap, suid, sgid or otherwise marked to change privilege upon * exec, update @bprm->cred to reflect that change. This is called after * finding the binary that will be executed without an interpreter. This * ensures that the credentials will not be derived from a script that the * binary will need to reopen, which when reopend may end up being a completely * different file. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. The * hook must add to @bprm->per_clear any personality flags that should be * cleared from current->personality. @bprm contains the linux_binprm * structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) { return call_int_hook(bprm_creds_from_file, bprm, file); } /** * security_bprm_check() - Mediate binary handler search * @bprm: binary program information * * This hook mediates the point when a search for a binary handler will begin. * It allows a check against the @bprm->cred->security value which was set in * the preceding creds_for_exec call. The argv list and envp list are reliably * available in @bprm. This hook may be called multiple times during a single * execve. @bprm contains the linux_binprm structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_check(struct linux_binprm *bprm) { return call_int_hook(bprm_check_security, bprm); } /** * security_bprm_committing_creds() - Install creds for a process during exec() * @bprm: binary program information * * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials pointed to * by @current->cred and the information set in @bprm->cred by the * bprm_creds_for_exec hook. @bprm points to the linux_binprm structure. This * hook is a good place to perform state changes on the process such as closing * open file descriptors to which access will no longer be granted when the * attributes are changed. This is called immediately before commit_creds(). */ void security_bprm_committing_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committing_creds, bprm); } /** * security_bprm_committed_creds() - Tidy up after cred install during exec() * @bprm: binary program information * * Tidy up after the installation of the new security attributes of a process * being transformed by an execve operation. The new credentials have, by this * point, been set to @current->cred. @bprm points to the linux_binprm * structure. This hook is a good place to perform state changes on the * process such as clearing out non-inheritable signal state. This is called * immediately after commit_creds(). */ void security_bprm_committed_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committed_creds, bprm); } /** * security_fs_context_submount() - Initialise fc->security * @fc: new filesystem context * @reference: dentry reference for submount/remount * * Fill out the ->security field for a new fs_context. * * Return: Returns 0 on success or negative error code on failure. */ int security_fs_context_submount(struct fs_context *fc, struct super_block *reference) { return call_int_hook(fs_context_submount, fc, reference); } /** * security_fs_context_dup() - Duplicate a fs_context LSM blob * @fc: destination filesystem context * @src_fc: source filesystem context * * Allocate and attach a security structure to sc->security. This pointer is * initialised to NULL by the caller. @fc indicates the new filesystem context. * @src_fc indicates the original filesystem context. * * Return: Returns 0 on success or a negative error code on failure. */ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { return call_int_hook(fs_context_dup, fc, src_fc); } /** * security_fs_context_parse_param() - Configure a filesystem context * @fc: filesystem context * @param: filesystem parameter * * Userspace provided a parameter to configure a superblock. The LSM can * consume the parameter or return it to the caller for use elsewhere. * * Return: If the parameter is used by the LSM it should return 0, if it is * returned to the caller -ENOPARAM is returned, otherwise a negative * error code is returned. */ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct lsm_static_call *scall; int trc; int rc = -ENOPARAM; lsm_for_each_hook(scall, fs_context_parse_param) { trc = scall->hl->hook.fs_context_parse_param(fc, param); if (trc == 0) rc = 0; else if (trc != -ENOPARAM) return trc; } return rc; } /** * security_sb_alloc() - Allocate a super_block LSM blob * @sb: filesystem superblock * * Allocate and attach a security structure to the sb->s_security field. The * s_security field is initialized to NULL when the structure is allocated. * @sb contains the super_block structure to be modified. * * Return: Returns 0 if operation was successful. */ int security_sb_alloc(struct super_block *sb) { int rc = lsm_superblock_alloc(sb); if (unlikely(rc)) return rc; rc = call_int_hook(sb_alloc_security, sb); if (unlikely(rc)) security_sb_free(sb); return rc; } /** * security_sb_delete() - Release super_block LSM associated objects * @sb: filesystem superblock * * Release objects tied to a superblock (e.g. inodes). @sb contains the * super_block structure being released. */ void security_sb_delete(struct super_block *sb) { call_void_hook(sb_delete, sb); } /** * security_sb_free() - Free a super_block LSM blob * @sb: filesystem superblock * * Deallocate and clear the sb->s_security field. @sb contains the super_block * structure to be modified. */ void security_sb_free(struct super_block *sb) { call_void_hook(sb_free_security, sb); kfree(sb->s_security); sb->s_security = NULL; } /** * security_free_mnt_opts() - Free memory associated with mount options * @mnt_opts: LSM processed mount options * * Free memory associated with @mnt_ops. */ void security_free_mnt_opts(void **mnt_opts) { if (!*mnt_opts) return; call_void_hook(sb_free_mnt_opts, *mnt_opts); *mnt_opts = NULL; } EXPORT_SYMBOL(security_free_mnt_opts); /** * security_sb_eat_lsm_opts() - Consume LSM mount options * @options: mount options * @mnt_opts: LSM processed mount options * * Eat (scan @options) and save them in @mnt_opts. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_eat_lsm_opts(char *options, void **mnt_opts) { return call_int_hook(sb_eat_lsm_opts, options, mnt_opts); } EXPORT_SYMBOL(security_sb_eat_lsm_opts); /** * security_sb_mnt_opts_compat() - Check if new mount options are allowed * @sb: filesystem superblock * @mnt_opts: new mount options * * Determine if the new mount options in @mnt_opts are allowed given the * existing mounted filesystem at @sb. @sb superblock being compared. * * Return: Returns 0 if options are compatible. */ int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_mnt_opts_compat, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_mnt_opts_compat); /** * security_sb_remount() - Verify no incompatible mount changes during remount * @sb: filesystem superblock * @mnt_opts: (re)mount options * * Extracts security system specific mount options and verifies no changes are * being made to those options. * * Return: Returns 0 if permission is granted. */ int security_sb_remount(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_remount, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_remount); /** * security_sb_kern_mount() - Check if a kernel mount is allowed * @sb: filesystem superblock * * Mount this @sb if allowed by permissions. * * Return: Returns 0 if permission is granted. */ int security_sb_kern_mount(const struct super_block *sb) { return call_int_hook(sb_kern_mount, sb); } /** * security_sb_show_options() - Output the mount options for a superblock * @m: output file * @sb: filesystem superblock * * Show (print on @m) mount options for this @sb. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_show_options(struct seq_file *m, struct super_block *sb) { return call_int_hook(sb_show_options, m, sb); } /** * security_sb_statfs() - Check if accessing fs stats is allowed * @dentry: superblock handle * * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. @dentry is a handle on the superblock for the filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_statfs(struct dentry *dentry) { return call_int_hook(sb_statfs, dentry); } /** * security_sb_mount() - Check permission for mounting a filesystem * @dev_name: filesystem backing device * @path: mount point * @type: filesystem type * @flags: mount flags * @data: filesystem specific data * * Check permission before an object specified by @dev_name is mounted on the * mount point named by @nd. For an ordinary mount, @dev_name identifies a * device if the file system type requires a device. For a remount * (@flags & MS_REMOUNT), @dev_name is irrelevant. For a loopback/bind mount * (@flags & MS_BIND), @dev_name identifies the pathname of the object being * mounted. * * Return: Returns 0 if permission is granted. */ int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return call_int_hook(sb_mount, dev_name, path, type, flags, data); } /** * security_sb_umount() - Check permission for unmounting a filesystem * @mnt: mounted filesystem * @flags: unmount flags * * Check permission before the @mnt file system is unmounted. * * Return: Returns 0 if permission is granted. */ int security_sb_umount(struct vfsmount *mnt, int flags) { return call_int_hook(sb_umount, mnt, flags); } /** * security_sb_pivotroot() - Check permissions for pivoting the rootfs * @old_path: new location for current rootfs * @new_path: location of the new rootfs * * Check permission before pivoting the root filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return call_int_hook(sb_pivotroot, old_path, new_path); } /** * security_sb_set_mnt_opts() - Set the mount options for a filesystem * @sb: filesystem superblock * @mnt_opts: binary mount options * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Set the security relevant mount options used for a superblock. * * Return: Returns 0 on success, error on failure. */ int security_sb_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { struct lsm_static_call *scall; int rc = mnt_opts ? -EOPNOTSUPP : LSM_RET_DEFAULT(sb_set_mnt_opts); lsm_for_each_hook(scall, sb_set_mnt_opts) { rc = scall->hl->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags, set_kern_flags); if (rc != LSM_RET_DEFAULT(sb_set_mnt_opts)) break; } return rc; } EXPORT_SYMBOL(security_sb_set_mnt_opts); /** * security_sb_clone_mnt_opts() - Duplicate superblock mount options * @oldsb: source superblock * @newsb: destination superblock * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Copy all security options from a given superblock to another. * * Return: Returns 0 on success, error on failure. */ int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) { return call_int_hook(sb_clone_mnt_opts, oldsb, newsb, kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); /** * security_move_mount() - Check permissions for moving a mount * @from_path: source mount point * @to_path: destination mount point * * Check permission before a mount is moved. * * Return: Returns 0 if permission is granted. */ int security_move_mount(const struct path *from_path, const struct path *to_path) { return call_int_hook(move_mount, from_path, to_path); } /** * security_path_notify() - Check if setting a watch is allowed * @path: file path * @mask: event mask * @obj_type: file path type * * Check permissions before setting a watch on events as defined by @mask, on * an object at @path, whose type is defined by @obj_type. * * Return: Returns 0 if permission is granted. */ int security_path_notify(const struct path *path, u64 mask, unsigned int obj_type) { return call_int_hook(path_notify, path, mask, obj_type); } /** * security_inode_alloc() - Allocate an inode LSM blob * @inode: the inode * @gfp: allocation flags * * Allocate and attach a security structure to @inode->i_security. The * i_security field is initialized to NULL when the inode structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_inode_alloc(struct inode *inode, gfp_t gfp) { int rc = lsm_inode_alloc(inode, gfp); if (unlikely(rc)) return rc; rc = call_int_hook(inode_alloc_security, inode); if (unlikely(rc)) security_inode_free(inode); return rc; } static void inode_free_by_rcu(struct rcu_head *head) { /* The rcu head is at the start of the inode blob */ call_void_hook(inode_free_security_rcu, head); kmem_cache_free(lsm_inode_cache, head); } /** * security_inode_free() - Free an inode's LSM blob * @inode: the inode * * Release any LSM resources associated with @inode, although due to the * inode's RCU protections it is possible that the resources will not be * fully released until after the current RCU grace period has elapsed. * * It is important for LSMs to note that despite being present in a call to * security_inode_free(), @inode may still be referenced in a VFS path walk * and calls to security_inode_permission() may be made during, or after, * a call to security_inode_free(). For this reason the inode->i_security * field is released via a call_rcu() callback and any LSMs which need to * retain inode state for use in security_inode_permission() should only * release that state in the inode_free_security_rcu() LSM hook callback. */ void security_inode_free(struct inode *inode) { call_void_hook(inode_free_security, inode); if (!inode->i_security) return; call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu); } /** * security_dentry_init_security() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @xattr_name: name of the security/LSM xattr * @lsmctx: pointer to the resulting LSM context * * Compute a context for a dentry as the inode is not yet available since NFSv4 * has no label backed by an EA anyway. It is important to note that * @xattr_name does not need to be free'd by the caller, it is a static string. * * Return: Returns 0 on success, negative values on failure. */ int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, struct lsm_context *lsmctx) { return call_int_hook(dentry_init_security, dentry, mode, name, xattr_name, lsmctx); } EXPORT_SYMBOL(security_dentry_init_security); /** * security_dentry_create_files_as() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @old: creds to use for LSM context calculations * @new: creds to modify * * Compute a context for a dentry as the inode is not yet available and set * that context in passed in creds so that new files are created using that * context. Context is calculated using the passed in creds and not the creds * of the caller. * * Return: Returns 0 on success, error on failure. */ int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) { return call_int_hook(dentry_create_files_as, dentry, mode, name, old, new); } EXPORT_SYMBOL(security_dentry_create_files_as); /** * security_inode_init_security() - Initialize an inode's LSM context * @inode: the inode * @dir: parent directory * @qstr: last component of the pathname * @initxattrs: callback function to write xattrs * @fs_data: filesystem specific data * * Obtain the security attribute name suffix and value to set on a newly * created inode and set up the incore security field for the new inode. This * hook is called by the fs code as part of the inode creation transaction and * provides for atomic labeling of the inode, unlike the post_create/mkdir/... * hooks called by the VFS. * * The hook function is expected to populate the xattrs array, by calling * lsm_get_xattr_slot() to retrieve the slots reserved by the security module * with the lbs_xattr_count field of the lsm_blob_sizes structure. For each * slot, the hook function should set ->name to the attribute name suffix * (e.g. selinux), to allocate ->value (will be freed by the caller) and set it * to the attribute value, to set ->value_len to the length of the value. If * the security module does not use security attributes or does not wish to put * a security attribute on this particular inode, then it should return * -EOPNOTSUPP to skip this processing. * * Return: Returns 0 if the LSM successfully initialized all of the inode * security attributes that are required, negative values otherwise. */ int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const initxattrs initxattrs, void *fs_data) { struct lsm_static_call *scall; struct xattr *new_xattrs = NULL; int ret = -EOPNOTSUPP, xattr_count = 0; if (unlikely(IS_PRIVATE(inode))) return 0; if (!blob_sizes.lbs_xattr_count) return 0; if (initxattrs) { /* Allocate +1 as terminator. */ new_xattrs = kcalloc(blob_sizes.lbs_xattr_count + 1, sizeof(*new_xattrs), GFP_NOFS); if (!new_xattrs) return -ENOMEM; } lsm_for_each_hook(scall, inode_init_security) { ret = scall->hl->hook.inode_init_security(inode, dir, qstr, new_xattrs, &xattr_count); if (ret && ret != -EOPNOTSUPP) goto out; /* * As documented in lsm_hooks.h, -EOPNOTSUPP in this context * means that the LSM is not willing to provide an xattr, not * that it wants to signal an error. Thus, continue to invoke * the remaining LSMs. */ } /* If initxattrs() is NULL, xattr_count is zero, skip the call. */ if (!xattr_count) goto out; ret = initxattrs(inode, new_xattrs, fs_data); out: for (; xattr_count > 0; xattr_count--) kfree(new_xattrs[xattr_count - 1].value); kfree(new_xattrs); return (ret == -EOPNOTSUPP) ? 0 : ret; } EXPORT_SYMBOL(security_inode_init_security); /** * security_inode_init_security_anon() - Initialize an anonymous inode * @inode: the inode * @name: the anonymous inode class * @context_inode: an optional related inode * * Set up the incore security field for the new anonymous inode and return * whether the inode creation is permitted by the security module or not. * * Return: Returns 0 on success, -EACCES if the security module denies the * creation of this inode, or another -errno upon other errors. */ int security_inode_init_security_anon(struct inode *inode, const struct qstr *name, const struct inode *context_inode) { return call_int_hook(inode_init_security_anon, inode, name, context_inode); } #ifdef CONFIG_SECURITY_PATH /** * security_path_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a file. Note that this hook is called even * if mknod operation is being done for a regular file. * * Return: Returns 0 if permission is granted. */ int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mknod, dir, dentry, mode, dev); } EXPORT_SYMBOL(security_path_mknod); /** * security_path_post_mknod() - Update inode security after reg file creation * @idmap: idmap of the mount * @dentry: new file * * Update inode security field after a regular file has been created. */ void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(path_post_mknod, idmap, dentry); } /** * security_path_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory. * * Return: Returns 0 if permission is granted. */ int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mkdir, dir, dentry, mode); } EXPORT_SYMBOL(security_path_mkdir); /** * security_path_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to remove * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_path_rmdir(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_rmdir, dir, dentry); } /** * security_path_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_unlink(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_unlink, dir, dentry); } EXPORT_SYMBOL(security_path_unlink); /** * security_path_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: file pathname * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_symlink, dir, dentry, old_name); } /** * security_path_link - Check if creating a hard link is allowed * @old_dentry: existing file * @new_dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(path_link, old_dentry, new_dir, new_dentry); } /** * security_path_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; return call_int_hook(path_rename, old_dir, old_dentry, new_dir, new_dentry, flags); } EXPORT_SYMBOL(security_path_rename); /** * security_path_truncate() - Check if truncating a file is allowed * @path: file * * Check permission before truncating the file indicated by path. Note that * truncation permissions may also be checked based on already opened files, * using the security_file_truncate() hook. * * Return: Returns 0 if permission is granted. */ int security_path_truncate(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_truncate, path); } /** * security_path_chmod() - Check if changing the file's mode is allowed * @path: file * @mode: new mode * * Check for permission to change a mode of the file @path. The new mode is * specified in @mode which is a bitmask of constants from * <include/uapi/linux/stat.h>. * * Return: Returns 0 if permission is granted. */ int security_path_chmod(const struct path *path, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chmod, path, mode); } /** * security_path_chown() - Check if changing the file's owner/group is allowed * @path: file * @uid: file owner * @gid: file group * * Check for permission to change owner/group of a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chown, path, uid, gid); } /** * security_path_chroot() - Check if changing the root directory is allowed * @path: directory * * Check for permission to change root directory. * * Return: Returns 0 if permission is granted. */ int security_path_chroot(const struct path *path) { return call_int_hook(path_chroot, path); } #endif /* CONFIG_SECURITY_PATH */ /** * security_inode_create() - Check if creating a file is allowed * @dir: the parent directory * @dentry: the file being created * @mode: requested file mode * * Check permission to create a regular file. * * Return: Returns 0 if permission is granted. */ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_create, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_create); /** * security_inode_post_create_tmpfile() - Update inode security of new tmpfile * @idmap: idmap of the mount * @inode: inode of the new tmpfile * * Update inode security data after a tmpfile has been created. */ void security_inode_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode) { if (unlikely(IS_PRIVATE(inode))) return; call_void_hook(inode_post_create_tmpfile, idmap, inode); } /** * security_inode_link() - Check if creating a hard link is allowed * @old_dentry: existing file * @dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(inode_link, old_dentry, dir, new_dentry); } /** * security_inode_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_unlink(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_unlink, dir, dentry); } /** * security_inode_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: existing filename * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_symlink, dir, dentry, old_name); } /** * security_inode_mkdir() - Check if creation a new director is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory * associated with inode structure @dir. * * Return: Returns 0 if permission is granted. */ int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mkdir, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_mkdir); /** * security_inode_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to be removed * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_rmdir, dir, dentry); } /** * security_inode_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a special file (or a socket or a fifo file * created via the mknod system call). Note that if mknod operation is being * done for a regular file, then the create hook will be called and not this * hook. * * Return: Returns 0 if permission is granted. */ int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mknod, dir, dentry, mode, dev); } /** * security_inode_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; if (flags & RENAME_EXCHANGE) { int err = call_int_hook(inode_rename, new_dir, new_dentry, old_dir, old_dentry); if (err) return err; } return call_int_hook(inode_rename, old_dir, old_dentry, new_dir, new_dentry); } /** * security_inode_readlink() - Check if reading a symbolic link is allowed * @dentry: link * * Check the permission to read the symbolic link. * * Return: Returns 0 if permission is granted. */ int security_inode_readlink(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_readlink, dentry); } /** * security_inode_follow_link() - Check if following a symbolic link is allowed * @dentry: link dentry * @inode: link inode * @rcu: true if in RCU-walk mode * * Check permission to follow a symbolic link when looking up a pathname. If * @rcu is true, @inode is not stable. * * Return: Returns 0 if permission is granted. */ int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_follow_link, dentry, inode, rcu); } /** * security_inode_permission() - Check if accessing an inode is allowed * @inode: inode * @mask: access mask * * Check permission before accessing an inode. This hook is called by the * existing Linux permission function, so a security module can use it to * provide additional checking for existing Linux permission checks. Notice * that this hook is called when a file is opened (as well as many other * operations), whereas the file_security_ops permission hook is called when * the actual read/write operations are performed. * * Return: Returns 0 if permission is granted. */ int security_inode_permission(struct inode *inode, int mask) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_permission, inode, mask); } /** * security_inode_setattr() - Check if setting file attributes is allowed * @idmap: idmap of the mount * @dentry: file * @attr: new attributes * * Check permission before setting file attributes. Note that the kernel call * to notify_change is performed from several locations, whenever file * attributes change (such as when a file is truncated, chown/chmod operations, * transferring disk quotas, etc). * * Return: Returns 0 if permission is granted. */ int security_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_setattr, idmap, dentry, attr); } EXPORT_SYMBOL_GPL(security_inode_setattr); /** * security_inode_post_setattr() - Update the inode after a setattr operation * @idmap: idmap of the mount * @dentry: file * @ia_valid: file attributes set * * Update inode security field after successful setting file attributes. */ void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setattr, idmap, dentry, ia_valid); } /** * security_inode_getattr() - Check if getting file attributes is allowed * @path: file * * Check permission before obtaining file attributes. * * Return: Returns 0 if permission is granted. */ int security_inode_getattr(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(inode_getattr, path); } /** * security_inode_setxattr() - Check if setting file xattrs is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * @value: xattr value * @size: size of xattr value * @flags: flags * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_setxattr(dentry, name, value, size, flags); if (rc) return rc; } return call_int_hook(inode_setxattr, idmap, dentry, name, value, size, flags); } /** * security_inode_set_acl() - Check if setting posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Check permission before setting posix acls, the posix acls in @kacl are * identified by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_set_acl, idmap, dentry, acl_name, kacl); } /** * security_inode_post_set_acl() - Update inode security from posix acls set * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Update inode security data after successfully setting posix acls on @dentry. * The posix acls in @kacl are identified by @acl_name. */ void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_set_acl, dentry, acl_name, kacl); } /** * security_inode_get_acl() - Check if reading posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before getting osix acls, the posix acls are identified by * @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_get_acl, idmap, dentry, acl_name); } /** * security_inode_remove_acl() - Check if removing a posix acl is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before removing posix acls, the posix acls are identified * by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_remove_acl() - Update inode security after rm posix acls * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Update inode security data after successfully removing posix acls on * @dentry in @idmap. The posix acls are identified by @acl_name. */ void security_inode_post_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_setxattr() - Update the inode after a setxattr operation * @dentry: file * @name: xattr name * @value: xattr value * @size: xattr value size * @flags: flags * * Update inode security field after successful setxattr operation. */ void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setxattr, dentry, name, value, size, flags); } /** * security_inode_getxattr() - Check if xattr access is allowed * @dentry: file * @name: xattr name * * Check permission before obtaining the extended attributes identified by * @name for @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_getxattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_getxattr, dentry, name); } /** * security_inode_listxattr() - Check if listing xattrs is allowed * @dentry: file * * Check permission before obtaining the list of extended attribute names for * @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_listxattr(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_listxattr, dentry); } /** * security_inode_removexattr() - Check if removing an xattr is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_removexattr(idmap, dentry, name); if (rc) return rc; } return call_int_hook(inode_removexattr, idmap, dentry, name); } /** * security_inode_post_removexattr() - Update the inode after a removexattr op * @dentry: file * @name: xattr name * * Update the inode after a successful removexattr operation. */ void security_inode_post_removexattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_removexattr, dentry, name); } /** * security_inode_need_killpriv() - Check if security_inode_killpriv() required * @dentry: associated dentry * * Called when an inode has been changed to determine if * security_inode_killpriv() should be called. * * Return: Return <0 on error to abort the inode change operation, return 0 if * security_inode_killpriv() does not need to be called, return >0 if * security_inode_killpriv() does need to be called. */ int security_inode_need_killpriv(struct dentry *dentry) { return call_int_hook(inode_need_killpriv, dentry); } /** * security_inode_killpriv() - The setuid bit is removed, update LSM state * @idmap: idmap of the mount * @dentry: associated dentry * * The @dentry's setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. * * Return: Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. */ int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry) { return call_int_hook(inode_killpriv, idmap, dentry); } /** * security_inode_getsecurity() - Get the xattr security label of an inode * @idmap: idmap of the mount * @inode: inode * @name: xattr name * @buffer: security label buffer * @alloc: allocation flag * * Retrieve a copy of the extended attribute representation of the security * label associated with @name for @inode via @buffer. Note that @name is the * remainder of the attribute name after the security prefix has been removed. * @alloc is used to specify if the call should return a value via the buffer * or just the value length. * * Return: Returns size of buffer on success. */ int security_inode_getsecurity(struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_getsecurity); return call_int_hook(inode_getsecurity, idmap, inode, name, buffer, alloc); } /** * security_inode_setsecurity() - Set the xattr security label of an inode * @inode: inode * @name: xattr name * @value: security label * @size: length of security label * @flags: flags * * Set the security label associated with @name for @inode from the extended * attribute value @value. @size indicates the size of the @value in bytes. * @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. Note that @name is the * remainder of the attribute name after the security. prefix has been removed. * * Return: Returns 0 on success. */ int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_setsecurity); return call_int_hook(inode_setsecurity, inode, name, value, size, flags); } /** * security_inode_listsecurity() - List the xattr security label names * @inode: inode * @buffer: buffer * @buffer_size: size of buffer * * Copy the extended attribute names for the security labels associated with * @inode into @buffer. The maximum size of @buffer is specified by * @buffer_size. @buffer may be NULL to request the size of the buffer * required. * * Return: Returns number of bytes used/required on success. */ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_listsecurity, inode, buffer, buffer_size); } EXPORT_SYMBOL(security_inode_listsecurity); /** * security_inode_getlsmprop() - Get an inode's LSM data * @inode: inode * @prop: lsm specific information to return * * Get the lsm specific information associated with the node. */ void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop) { call_void_hook(inode_getlsmprop, inode, prop); } /** * security_inode_copy_up() - Create new creds for an overlayfs copy-up op * @src: union dentry of copy-up file * @new: newly created creds * * A file is about to be copied up from lower layer to upper layer of overlay * filesystem. Security module can prepare a set of new creds and modify as * need be and return new creds. Caller will switch to new creds temporarily to * create new file and release newly allocated creds. * * Return: Returns 0 on success or a negative error code on error. */ int security_inode_copy_up(struct dentry *src, struct cred **new) { return call_int_hook(inode_copy_up, src, new); } EXPORT_SYMBOL(security_inode_copy_up); /** * security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op * @src: union dentry of copy-up file * @name: xattr name * * Filter the xattrs being copied up when a unioned file is copied up from a * lower layer to the union/overlay layer. The caller is responsible for * reading and writing the xattrs, this hook is merely a filter. * * Return: Returns 0 to accept the xattr, -ECANCELED to discard the xattr, * -EOPNOTSUPP if the security module does not know about attribute, * or a negative error code to abort the copy up. */ int security_inode_copy_up_xattr(struct dentry *src, const char *name) { int rc; rc = call_int_hook(inode_copy_up_xattr, src, name); if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr)) return rc; return LSM_RET_DEFAULT(inode_copy_up_xattr); } EXPORT_SYMBOL(security_inode_copy_up_xattr); /** * security_inode_setintegrity() - Set the inode's integrity data * @inode: inode * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a inode with LSMs. * LSMs should free the previously saved data if @value is NULL. * * Return: Returns 0 on success, negative values on failure. */ int security_inode_setintegrity(const struct inode *inode, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(inode_setintegrity, inode, type, value, size); } EXPORT_SYMBOL(security_inode_setintegrity); /** * security_kernfs_init_security() - Init LSM context for a kernfs node * @kn_dir: parent kernfs node * @kn: the kernfs node to initialize * * Initialize the security context of a newly created kernfs node based on its * own and its parent's attributes. * * Return: Returns 0 if permission is granted. */ int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { return call_int_hook(kernfs_init_security, kn_dir, kn); } /** * security_file_permission() - Check file permissions * @file: file * @mask: requested permissions * * Check file permissions before accessing an open file. This hook is called * by various operations that read or write files. A security module can use * this hook to perform additional checking on these operations, e.g. to * revalidate permissions on use to support privilege bracketing or policy * changes. Notice that this hook is used when the actual read/write * operations are performed, whereas the inode_security_ops hook is called when * a file is opened (as well as many other operations). Although this hook can * be used to revalidate permissions for various system call operations that * read or write files, it does not address the revalidation of permissions for * memory-mapped files. Security modules must handle this separately if they * need such revalidation. * * Return: Returns 0 if permission is granted. */ int security_file_permission(struct file *file, int mask) { return call_int_hook(file_permission, file, mask); } /** * security_file_alloc() - Allocate and init a file's LSM blob * @file: the file * * Allocate and attach a security structure to the file->f_security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if the hook is successful and permission is granted. */ int security_file_alloc(struct file *file) { int rc = lsm_file_alloc(file); if (rc) return rc; rc = call_int_hook(file_alloc_security, file); if (unlikely(rc)) security_file_free(file); return rc; } /** * security_file_release() - Perform actions before releasing the file ref * @file: the file * * Perform actions before releasing the last reference to a file. */ void security_file_release(struct file *file) { call_void_hook(file_release, file); } /** * security_file_free() - Free a file's LSM blob * @file: the file * * Deallocate and free any security structures stored in file->f_security. */ void security_file_free(struct file *file) { void *blob; call_void_hook(file_free_security, file); blob = file->f_security; if (blob) { file->f_security = NULL; kmem_cache_free(lsm_file_cache, blob); } } /** * security_file_ioctl() - Check if an ioctl is allowed * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Check permission for an ioctl operation on @file. Note that @arg sometimes * represents a user space pointer; in other cases, it may be a simple integer * value. When @arg represents a user space pointer, it should never be used * by the security module. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl); /** * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Compat version of security_file_ioctl() that correctly handles 32-bit * processes running on 64-bit kernels. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl_compat, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl_compat); static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { /* * Does we have PROT_READ and does the application expect * it to imply PROT_EXEC? If not, nothing to talk about... */ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) return prot; if (!(current->personality & READ_IMPLIES_EXEC)) return prot; /* * if that's an anonymous mapping, let it. */ if (!file) return prot | PROT_EXEC; /* * ditto if it's not on noexec mount, except that on !MMU we need * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case */ if (!path_noexec(&file->f_path)) { #ifndef CONFIG_MMU if (file->f_op->mmap_capabilities) { unsigned caps = file->f_op->mmap_capabilities(file); if (!(caps & NOMMU_MAP_EXEC)) return prot; } #endif return prot | PROT_EXEC; } /* anything on noexec mount won't get PROT_EXEC */ return prot; } /** * security_mmap_file() - Check if mmap'ing a file is allowed * @file: file * @prot: protection applied by the kernel * @flags: flags * * Check permissions for a mmap operation. The @file may be NULL, e.g. if * mapping anonymous memory. * * Return: Returns 0 if permission is granted. */ int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { return call_int_hook(mmap_file, file, prot, mmap_prot(file, prot), flags); } /** * security_mmap_addr() - Check if mmap'ing an address is allowed * @addr: address * * Check permissions for a mmap operation at @addr. * * Return: Returns 0 if permission is granted. */ int security_mmap_addr(unsigned long addr) { return call_int_hook(mmap_addr, addr); } /** * security_file_mprotect() - Check if changing memory protections is allowed * @vma: memory region * @reqprot: application requested protection * @prot: protection applied by the kernel * * Check permissions before changing memory access permissions. * * Return: Returns 0 if permission is granted. */ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return call_int_hook(file_mprotect, vma, reqprot, prot); } /** * security_file_lock() - Check if a file lock is allowed * @file: file * @cmd: lock operation (e.g. F_RDLCK, F_WRLCK) * * Check permission before performing file locking operations. Note the hook * mediates both flock and fcntl style locks. * * Return: Returns 0 if permission is granted. */ int security_file_lock(struct file *file, unsigned int cmd) { return call_int_hook(file_lock, file, cmd); } /** * security_file_fcntl() - Check if fcntl() op is allowed * @file: file * @cmd: fcntl command * @arg: command argument * * Check permission before allowing the file operation specified by @cmd from * being performed on the file @file. Note that @arg sometimes represents a * user space pointer; in other cases, it may be a simple integer value. When * @arg represents a user space pointer, it should never be used by the * security module. * * Return: Returns 0 if permission is granted. */ int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_fcntl, file, cmd, arg); } /** * security_file_set_fowner() - Set the file owner info in the LSM blob * @file: the file * * Save owner security information (typically from current->security) in * file->f_security for later use by the send_sigiotask hook. * * This hook is called with file->f_owner.lock held. * * Return: Returns 0 on success. */ void security_file_set_fowner(struct file *file) { call_void_hook(file_set_fowner, file); } /** * security_file_send_sigiotask() - Check if sending SIGIO/SIGURG is allowed * @tsk: target task * @fown: signal sender * @sig: signal to be sent, SIGIO is sent if 0 * * Check permission for the file owner @fown to send SIGIO or SIGURG to the * process @tsk. Note that this hook is sometimes called from interrupt. Note * that the fown_struct, @fown, is never outside the context of a struct file, * so the file structure (and associated security information) can always be * obtained: container_of(fown, struct file, f_owner). * * Return: Returns 0 if permission is granted. */ int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig) { return call_int_hook(file_send_sigiotask, tsk, fown, sig); } /** * security_file_receive() - Check if receiving a file via IPC is allowed * @file: file being received * * This hook allows security modules to control the ability of a process to * receive an open file descriptor via socket IPC. * * Return: Returns 0 if permission is granted. */ int security_file_receive(struct file *file) { return call_int_hook(file_receive, file); } /** * security_file_open() - Save open() time state for late use by the LSM * @file: * * Save open-time permission checking state for later use upon file_permission, * and recheck access if anything has changed since inode_permission. * * We can check if a file is opened for execution (e.g. execve(2) call), either * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags & * __FMODE_EXEC . * * Return: Returns 0 if permission is granted. */ int security_file_open(struct file *file) { return call_int_hook(file_open, file); } /** * security_file_post_open() - Evaluate a file after it has been opened * @file: the file * @mask: access mask * * Evaluate an opened file and the access mask requested with open(). The hook * is useful for LSMs that require the file content to be available in order to * make decisions. * * Return: Returns 0 if permission is granted. */ int security_file_post_open(struct file *file, int mask) { return call_int_hook(file_post_open, file, mask); } EXPORT_SYMBOL_GPL(security_file_post_open); /** * security_file_truncate() - Check if truncating a file is allowed * @file: file * * Check permission before truncating a file, i.e. using ftruncate. Note that * truncation permission may also be checked based on the path, using the * @path_truncate hook. * * Return: Returns 0 if permission is granted. */ int security_file_truncate(struct file *file) { return call_int_hook(file_truncate, file); } /** * security_task_alloc() - Allocate a task's LSM blob * @task: the task * @clone_flags: flags indicating what is being shared * * Handle allocation of task-related resources. * * Return: Returns a zero on success, negative values on failure. */ int security_task_alloc(struct task_struct *task, unsigned long clone_flags) { int rc = lsm_task_alloc(task); if (rc) return rc; rc = call_int_hook(task_alloc, task, clone_flags); if (unlikely(rc)) security_task_free(task); return rc; } /** * security_task_free() - Free a task's LSM blob and related resources * @task: task * * Handle release of task-related resources. Note that this can be called from * interrupt context. */ void security_task_free(struct task_struct *task) { call_void_hook(task_free, task); kfree(task->security); task->security = NULL; } /** * security_cred_alloc_blank() - Allocate the min memory to allow cred_transfer * @cred: credentials * @gfp: gfp flags * * Only allocate sufficient memory and attach to @cred such that * cred_transfer() will not get ENOMEM. * * Return: Returns 0 on success, negative values on failure. */ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { int rc = lsm_cred_alloc(cred, gfp); if (rc) return rc; rc = call_int_hook(cred_alloc_blank, cred, gfp); if (unlikely(rc)) security_cred_free(cred); return rc; } /** * security_cred_free() - Free the cred's LSM blob and associated resources * @cred: credentials * * Deallocate and clear the cred->security field in a set of credentials. */ void security_cred_free(struct cred *cred) { /* * There is a failure case in prepare_creds() that * may result in a call here with ->security being NULL. */ if (unlikely(cred->security == NULL)) return; call_void_hook(cred_free, cred); kfree(cred->security); cred->security = NULL; } /** * security_prepare_creds() - Prepare a new set of credentials * @new: new credentials * @old: original credentials * @gfp: gfp flags * * Prepare a new set of credentials by copying the data from the old set. * * Return: Returns 0 on success, negative values on failure. */ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { int rc = lsm_cred_alloc(new, gfp); if (rc) return rc; rc = call_int_hook(cred_prepare, new, old, gfp); if (unlikely(rc)) security_cred_free(new); return rc; } /** * security_transfer_creds() - Transfer creds * @new: target credentials * @old: original credentials * * Transfer data from original creds to new creds. */ void security_transfer_creds(struct cred *new, const struct cred *old) { call_void_hook(cred_transfer, new, old); } /** * security_cred_getsecid() - Get the secid from a set of credentials * @c: credentials * @secid: secid value * * Retrieve the security identifier of the cred structure @c. In case of * failure, @secid will be set to zero. */ void security_cred_getsecid(const struct cred *c, u32 *secid) { *secid = 0; call_void_hook(cred_getsecid, c, secid); } EXPORT_SYMBOL(security_cred_getsecid); /** * security_cred_getlsmprop() - Get the LSM data from a set of credentials * @c: credentials * @prop: destination for the LSM data * * Retrieve the security data of the cred structure @c. In case of * failure, @prop will be cleared. */ void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(cred_getlsmprop, c, prop); } EXPORT_SYMBOL(security_cred_getlsmprop); /** * security_kernel_act_as() - Set the kernel credentials to act as secid * @new: credentials * @secid: secid * * Set the credentials for a kernel service to act as (subjective context). * The current task must be the one that nominated @secid. * * Return: Returns 0 if successful. */ int security_kernel_act_as(struct cred *new, u32 secid) { return call_int_hook(kernel_act_as, new, secid); } /** * security_kernel_create_files_as() - Set file creation context using an inode * @new: target credentials * @inode: reference inode * * Set the file creation context in a set of credentials to be the same as the * objective context of the specified inode. The current task must be the one * that nominated @inode. * * Return: Returns 0 if successful. */ int security_kernel_create_files_as(struct cred *new, struct inode *inode) { return call_int_hook(kernel_create_files_as, new, inode); } /** * security_kernel_module_request() - Check if loading a module is allowed * @kmod_name: module name * * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. * * Return: Returns 0 if successful. */ int security_kernel_module_request(char *kmod_name) { return call_int_hook(kernel_module_request, kmod_name); } /** * security_kernel_read_file() - Read a file specified by userspace * @file: file * @id: file identifier * @contents: trust if security_kernel_post_read_file() will be called * * Read a file specified by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_read_file(struct file *file, enum kernel_read_file_id id, bool contents) { return call_int_hook(kernel_read_file, file, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_read_file); /** * security_kernel_post_read_file() - Read a file specified by userspace * @file: file * @buf: file contents * @size: size of file contents * @id: file identifier * * Read a file specified by userspace. This must be paired with a prior call * to security_kernel_read_file() call that indicated this hook would also be * called, see security_kernel_read_file() for more information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) { return call_int_hook(kernel_post_read_file, file, buf, size, id); } EXPORT_SYMBOL_GPL(security_kernel_post_read_file); /** * security_kernel_load_data() - Load data provided by userspace * @id: data identifier * @contents: true if security_kernel_post_load_data() will be called * * Load data provided by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_load_data(enum kernel_load_data_id id, bool contents) { return call_int_hook(kernel_load_data, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_load_data); /** * security_kernel_post_load_data() - Load userspace data from a non-file source * @buf: data * @size: size of data * @id: data identifier * @description: text description of data, specific to the id value * * Load data provided by a non-file source (usually userspace buffer). This * must be paired with a prior security_kernel_load_data() call that indicated * this hook would also be called, see security_kernel_load_data() for more * information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_load_data(char *buf, loff_t size, enum kernel_load_data_id id, char *description) { return call_int_hook(kernel_post_load_data, buf, size, id, description); } EXPORT_SYMBOL_GPL(security_kernel_post_load_data); /** * security_task_fix_setuid() - Update LSM with new user id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag values * * Update the module's state after setting one or more of the user identity * attributes of the current process. The @flags parameter indicates which of * the set*uid system calls invoked this hook. If @new is the set of * credentials that will be installed. Modifications should be made to this * rather than to @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setuid, new, old, flags); } /** * security_task_fix_setgid() - Update LSM with new group id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag value * * Update the module's state after setting one or more of the group identity * attributes of the current process. The @flags parameter indicates which of * the set*gid system calls invoked this hook. @new is the set of credentials * that will be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setgid, new, old, flags); } /** * security_task_fix_setgroups() - Update LSM with new supplementary groups * @new: updated credentials * @old: credentials being replaced * * Update the module's state after setting the supplementary group identity * attributes of the current process. @new is the set of credentials that will * be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgroups(struct cred *new, const struct cred *old) { return call_int_hook(task_fix_setgroups, new, old); } /** * security_task_setpgid() - Check if setting the pgid is allowed * @p: task being modified * @pgid: new pgid * * Check permission before setting the process group identifier of the process * @p to @pgid. * * Return: Returns 0 if permission is granted. */ int security_task_setpgid(struct task_struct *p, pid_t pgid) { return call_int_hook(task_setpgid, p, pgid); } /** * security_task_getpgid() - Check if getting the pgid is allowed * @p: task * * Check permission before getting the process group identifier of the process * @p. * * Return: Returns 0 if permission is granted. */ int security_task_getpgid(struct task_struct *p) { return call_int_hook(task_getpgid, p); } /** * security_task_getsid() - Check if getting the session id is allowed * @p: task * * Check permission before getting the session identifier of the process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getsid(struct task_struct *p) { return call_int_hook(task_getsid, p); } /** * security_current_getlsmprop_subj() - Current task's subjective LSM data * @prop: lsm specific information * * Retrieve the subjective security identifier of the current task and return * it in @prop. */ void security_current_getlsmprop_subj(struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(current_getlsmprop_subj, prop); } EXPORT_SYMBOL(security_current_getlsmprop_subj); /** * security_task_getlsmprop_obj() - Get a task's objective LSM data * @p: target task * @prop: lsm specific information * * Retrieve the objective security identifier of the task_struct in @p and * return it in @prop. */ void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(task_getlsmprop_obj, p, prop); } EXPORT_SYMBOL(security_task_getlsmprop_obj); /** * security_task_setnice() - Check if setting a task's nice value is allowed * @p: target task * @nice: nice value * * Check permission before setting the nice value of @p to @nice. * * Return: Returns 0 if permission is granted. */ int security_task_setnice(struct task_struct *p, int nice) { return call_int_hook(task_setnice, p, nice); } /** * security_task_setioprio() - Check if setting a task's ioprio is allowed * @p: target task * @ioprio: ioprio value * * Check permission before setting the ioprio value of @p to @ioprio. * * Return: Returns 0 if permission is granted. */ int security_task_setioprio(struct task_struct *p, int ioprio) { return call_int_hook(task_setioprio, p, ioprio); } /** * security_task_getioprio() - Check if getting a task's ioprio is allowed * @p: task * * Check permission before getting the ioprio value of @p. * * Return: Returns 0 if permission is granted. */ int security_task_getioprio(struct task_struct *p) { return call_int_hook(task_getioprio, p); } /** * security_task_prlimit() - Check if get/setting resources limits is allowed * @cred: current task credentials * @tcred: target task credentials * @flags: LSM_PRLIMIT_* flag bits indicating a get/set/both * * Check permission before getting and/or setting the resource limits of * another task. * * Return: Returns 0 if permission is granted. */ int security_task_prlimit(const struct cred *cred, const struct cred *tcred, unsigned int flags) { return call_int_hook(task_prlimit, cred, tcred, flags); } /** * security_task_setrlimit() - Check if setting a new rlimit value is allowed * @p: target task's group leader * @resource: resource whose limit is being set * @new_rlim: new resource limit * * Check permission before setting the resource limits of process @p for * @resource to @new_rlim. The old resource limit values can be examined by * dereferencing (p->signal->rlim + resource). * * Return: Returns 0 if permission is granted. */ int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) { return call_int_hook(task_setrlimit, p, resource, new_rlim); } /** * security_task_setscheduler() - Check if setting sched policy/param is allowed * @p: target task * * Check permission before setting scheduling policy and/or parameters of * process @p. * * Return: Returns 0 if permission is granted. */ int security_task_setscheduler(struct task_struct *p) { return call_int_hook(task_setscheduler, p); } /** * security_task_getscheduler() - Check if getting scheduling info is allowed * @p: target task * * Check permission before obtaining scheduling information for process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getscheduler(struct task_struct *p) { return call_int_hook(task_getscheduler, p); } /** * security_task_movememory() - Check if moving memory is allowed * @p: task * * Check permission before moving memory owned by process @p. * * Return: Returns 0 if permission is granted. */ int security_task_movememory(struct task_struct *p) { return call_int_hook(task_movememory, p); } /** * security_task_kill() - Check if sending a signal is allowed * @p: target process * @info: signal information * @sig: signal value * @cred: credentials of the signal sender, NULL if @current * * Check permission before sending signal @sig to @p. @info can be NULL, the * constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming from * the kernel and should typically be permitted. SIGIO signals are handled * separately by the send_sigiotask hook in file_security_ops. * * Return: Returns 0 if permission is granted. */ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { return call_int_hook(task_kill, p, info, sig, cred); } /** * security_task_prctl() - Check if a prctl op is allowed * @option: operation * @arg2: argument * @arg3: argument * @arg4: argument * @arg5: argument * * Check permission before performing a process control operation on the * current process. * * Return: Return -ENOSYS if no-one wanted to handle this op, any other value * to cause prctl() to return immediately with that value. */ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int thisrc; int rc = LSM_RET_DEFAULT(task_prctl); struct lsm_static_call *scall; lsm_for_each_hook(scall, task_prctl) { thisrc = scall->hl->hook.task_prctl(option, arg2, arg3, arg4, arg5); if (thisrc != LSM_RET_DEFAULT(task_prctl)) { rc = thisrc; if (thisrc != 0) break; } } return rc; } /** * security_task_to_inode() - Set the security attributes of a task's inode * @p: task * @inode: inode * * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. */ void security_task_to_inode(struct task_struct *p, struct inode *inode) { call_void_hook(task_to_inode, p, inode); } /** * security_create_user_ns() - Check if creating a new userns is allowed * @cred: prepared creds * * Check permission prior to creating a new user namespace. * * Return: Returns 0 if successful, otherwise < 0 error code. */ int security_create_user_ns(const struct cred *cred) { return call_int_hook(userns_create, cred); } /** * security_ipc_permission() - Check if sysv ipc access is allowed * @ipcp: ipc permission structure * @flag: requested permissions * * Check permissions for access to IPC. * * Return: Returns 0 if permission is granted. */ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag) { return call_int_hook(ipc_permission, ipcp, flag); } /** * security_ipc_getlsmprop() - Get the sysv ipc object LSM data * @ipcp: ipc permission structure * @prop: pointer to lsm information * * Get the lsm information associated with the ipc object. */ void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(ipc_getlsmprop, ipcp, prop); } /** * security_msg_msg_alloc() - Allocate a sysv ipc message LSM blob * @msg: message structure * * Allocate and attach a security structure to the msg->security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if operation was successful and permission is granted. */ int security_msg_msg_alloc(struct msg_msg *msg) { int rc = lsm_msg_msg_alloc(msg); if (unlikely(rc)) return rc; rc = call_int_hook(msg_msg_alloc_security, msg); if (unlikely(rc)) security_msg_msg_free(msg); return rc; } /** * security_msg_msg_free() - Free a sysv ipc message LSM blob * @msg: message structure * * Deallocate the security structure for this message. */ void security_msg_msg_free(struct msg_msg *msg) { call_void_hook(msg_msg_free_security, msg); kfree(msg->security); msg->security = NULL; } /** * security_msg_queue_alloc() - Allocate a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Allocate and attach a security structure to @msg. The security field is * initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_msg_queue_alloc(struct kern_ipc_perm *msq) { int rc = lsm_ipc_alloc(msq); if (unlikely(rc)) return rc; rc = call_int_hook(msg_queue_alloc_security, msq); if (unlikely(rc)) security_msg_queue_free(msq); return rc; } /** * security_msg_queue_free() - Free a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Deallocate security field @perm->security for the message queue. */ void security_msg_queue_free(struct kern_ipc_perm *msq) { call_void_hook(msg_queue_free_security, msq); kfree(msq->security); msq->security = NULL; } /** * security_msg_queue_associate() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @msqflg: operation flags * * Check permission when a message queue is requested through the msgget system * call. This hook is only called when returning the message queue identifier * for an existing message queue, not when a new message queue is created. * * Return: Return 0 if permission is granted. */ int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg) { return call_int_hook(msg_queue_associate, msq, msqflg); } /** * security_msg_queue_msgctl() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @cmd: operation * * Check permission when a message control operation specified by @cmd is to be * performed on the message queue with permissions. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) { return call_int_hook(msg_queue_msgctl, msq, cmd); } /** * security_msg_queue_msgsnd() - Check if sending a sysv ipc message is allowed * @msq: sysv ipc permission structure * @msg: message * @msqflg: operation flags * * Check permission before a message, @msg, is enqueued on the message queue * with permissions specified in @msq. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg) { return call_int_hook(msg_queue_msgsnd, msq, msg, msqflg); } /** * security_msg_queue_msgrcv() - Check if receiving a sysv ipc msg is allowed * @msq: sysv ipc permission structure * @msg: message * @target: target task * @type: type of message requested * @mode: operation flags * * Check permission before a message, @msg, is removed from the message queue. * The @target task structure contains a pointer to the process that will be * receiving the message (not equal to the current process when inline receives * are being performed). * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return call_int_hook(msg_queue_msgrcv, msq, msg, target, type, mode); } /** * security_shm_alloc() - Allocate a sysv shm LSM blob * @shp: sysv ipc permission structure * * Allocate and attach a security structure to the @shp security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_shm_alloc(struct kern_ipc_perm *shp) { int rc = lsm_ipc_alloc(shp); if (unlikely(rc)) return rc; rc = call_int_hook(shm_alloc_security, shp); if (unlikely(rc)) security_shm_free(shp); return rc; } /** * security_shm_free() - Free a sysv shm LSM blob * @shp: sysv ipc permission structure * * Deallocate the security structure @perm->security for the memory segment. */ void security_shm_free(struct kern_ipc_perm *shp) { call_void_hook(shm_free_security, shp); kfree(shp->security); shp->security = NULL; } /** * security_shm_associate() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @shmflg: operation flags * * Check permission when a shared memory region is requested through the shmget * system call. This hook is only called when returning the shared memory * region identifier for an existing region, not when a new shared memory * region is created. * * Return: Returns 0 if permission is granted. */ int security_shm_associate(struct kern_ipc_perm *shp, int shmflg) { return call_int_hook(shm_associate, shp, shmflg); } /** * security_shm_shmctl() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @cmd: operation * * Check permission when a shared memory control operation specified by @cmd is * to be performed on the shared memory region with permissions in @shp. * * Return: Return 0 if permission is granted. */ int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) { return call_int_hook(shm_shmctl, shp, cmd); } /** * security_shm_shmat() - Check if a sysv shm attach operation is allowed * @shp: sysv ipc permission structure * @shmaddr: address of memory region to attach * @shmflg: operation flags * * Check permissions prior to allowing the shmat system call to attach the * shared memory segment with permissions @shp to the data segment of the * calling process. The attaching address is specified by @shmaddr. * * Return: Returns 0 if permission is granted. */ int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg) { return call_int_hook(shm_shmat, shp, shmaddr, shmflg); } /** * security_sem_alloc() - Allocate a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Allocate and attach a security structure to the @sma security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_sem_alloc(struct kern_ipc_perm *sma) { int rc = lsm_ipc_alloc(sma); if (unlikely(rc)) return rc; rc = call_int_hook(sem_alloc_security, sma); if (unlikely(rc)) security_sem_free(sma); return rc; } /** * security_sem_free() - Free a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Deallocate security structure @sma->security for the semaphore. */ void security_sem_free(struct kern_ipc_perm *sma) { call_void_hook(sem_free_security, sma); kfree(sma->security); sma->security = NULL; } /** * security_sem_associate() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @semflg: operation flags * * Check permission when a semaphore is requested through the semget system * call. This hook is only called when returning the semaphore identifier for * an existing semaphore, not when a new one must be created. * * Return: Returns 0 if permission is granted. */ int security_sem_associate(struct kern_ipc_perm *sma, int semflg) { return call_int_hook(sem_associate, sma, semflg); } /** * security_sem_semctl() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @cmd: operation * * Check permission when a semaphore operation specified by @cmd is to be * performed on the semaphore. * * Return: Returns 0 if permission is granted. */ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) { return call_int_hook(sem_semctl, sma, cmd); } /** * security_sem_semop() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @sops: operations to perform * @nsops: number of operations * @alter: flag indicating changes will be made * * Check permissions before performing operations on members of the semaphore * set. If the @alter flag is nonzero, the semaphore set may be modified. * * Return: Returns 0 if permission is granted. */ int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter) { return call_int_hook(sem_semop, sma, sops, nsops, alter); } /** * security_d_instantiate() - Populate an inode's LSM state based on a dentry * @dentry: dentry * @inode: inode * * Fill in @inode security information for a @dentry if allowed. */ void security_d_instantiate(struct dentry *dentry, struct inode *inode) { if (unlikely(inode && IS_PRIVATE(inode))) return; call_void_hook(d_instantiate, dentry, inode); } EXPORT_SYMBOL(security_d_instantiate); /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_getselfattr - Read an LSM attribute of the current process. * @attr: which attribute to return * @uctx: the user-space destination for the information, or NULL * @size: pointer to the size of space available to receive the data * @flags: special handling options. LSM_FLAG_SINGLE indicates that only * attributes associated with the LSM identified in the passed @ctx be * reported. * * A NULL value for @uctx can be used to get both the number of attributes * and the size of the data. * * Returns the number of attributes found on success, negative value * on error. @size is reset to the total size of the data. * If @size is insufficient to contain the data -E2BIG is returned. */ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 __user *size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, }; u8 __user *base = (u8 __user *)uctx; u32 entrysize; u32 total = 0; u32 left; bool toobig = false; bool single = false; int count = 0; int rc; if (attr == LSM_ATTR_UNDEF) return -EINVAL; if (size == NULL) return -EINVAL; if (get_user(left, size)) return -EFAULT; if (flags) { /* * Only flag supported is LSM_FLAG_SINGLE */ if (flags != LSM_FLAG_SINGLE || !uctx) return -EINVAL; if (copy_from_user(&lctx, uctx, sizeof(lctx))) return -EFAULT; /* * If the LSM ID isn't specified it is an error. */ if (lctx.id == LSM_ID_UNDEF) return -EINVAL; single = true; } /* * In the usual case gather all the data from the LSMs. * In the single case only get the data from the LSM specified. */ lsm_for_each_hook(scall, getselfattr) { if (single && lctx.id != scall->hl->lsmid->id) continue; entrysize = left; if (base) uctx = (struct lsm_ctx __user *)(base + total); rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags); if (rc == -EOPNOTSUPP) continue; if (rc == -E2BIG) { rc = 0; left = 0; toobig = true; } else if (rc < 0) return rc; else left -= entrysize; total += entrysize; count += rc; if (single) break; } if (put_user(total, size)) return -EFAULT; if (toobig) return -E2BIG; if (count == 0) return LSM_RET_DEFAULT(getselfattr); return count; } /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_setselfattr - Set an LSM attribute on the current process. * @attr: which attribute to set * @uctx: the user-space source for the information * @size: the size of the data * @flags: reserved for future use, must be 0 * * Set an LSM attribute for the current process. The LSM, attribute * and new value are included in @uctx. * * Returns 0 on success, -EINVAL if the input is inconsistent, -EFAULT * if the user buffer is inaccessible, E2BIG if size is too big, or an * LSM specific failure. */ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx *lctx; int rc = LSM_RET_DEFAULT(setselfattr); u64 required_len; if (flags) return -EINVAL; if (size < sizeof(*lctx)) return -EINVAL; if (size > PAGE_SIZE) return -E2BIG; lctx = memdup_user(uctx, size); if (IS_ERR(lctx)) return PTR_ERR(lctx); if (size < lctx->len || check_add_overflow(sizeof(*lctx), lctx->ctx_len, &required_len) || lctx->len < required_len) { rc = -EINVAL; goto free_out; } lsm_for_each_hook(scall, setselfattr) if ((scall->hl->lsmid->id) == lctx->id) { rc = scall->hl->hook.setselfattr(attr, lctx, size, flags); break; } free_out: kfree(lctx); return rc; } /** * security_getprocattr() - Read an attribute for a task * @p: the task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * * Read attribute @name for task @p and store it into @value if allowed. * * Return: Returns the length of @value on success, a negative value otherwise. */ int security_getprocattr(struct task_struct *p, int lsmid, const char *name, char **value) { struct lsm_static_call *scall; lsm_for_each_hook(scall, getprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.getprocattr(p, name, value); } return LSM_RET_DEFAULT(getprocattr); } /** * security_setprocattr() - Set an attribute for a task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * @size: attribute value size * * Write (set) the current task's attribute @name to @value, size @size if * allowed. * * Return: Returns bytes written on success, a negative value otherwise. */ int security_setprocattr(int lsmid, const char *name, void *value, size_t size) { struct lsm_static_call *scall; lsm_for_each_hook(scall, setprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.setprocattr(name, value, size); } return LSM_RET_DEFAULT(setprocattr); } /** * security_netlink_send() - Save info and check if netlink sending is allowed * @sk: sending socket * @skb: netlink message * * Save security information for a netlink message so that permission checking * can be performed when the message is processed. The security information * can be saved using the eff_cap field of the netlink_skb_parms structure. * Also may be used to provide fine grained control over message transmission. * * Return: Returns 0 if the information was successfully saved and message is * allowed to be transmitted. */ int security_netlink_send(struct sock *sk, struct sk_buff *skb) { return call_int_hook(netlink_send, sk, skb); } /** * security_ismaclabel() - Check if the named attribute is a MAC label * @name: full extended attribute name * * Check if the extended attribute specified by @name represents a MAC label. * * Return: Returns 1 if name is a MAC attribute otherwise returns 0. */ int security_ismaclabel(const char *name) { return call_int_hook(ismaclabel, name); } EXPORT_SYMBOL(security_ismaclabel); /** * security_secid_to_secctx() - Convert a secid to a secctx * @secid: secid * @cp: the LSM context * * Convert secid to security context. If @cp is NULL the length of the * result will be returned, but no data will be returned. This * does mean that the length could change between calls to check the length and * the next call which actually allocates and returns the data. * * Return: Return length of data on success, error on failure. */ int security_secid_to_secctx(u32 secid, struct lsm_context *cp) { return call_int_hook(secid_to_secctx, secid, cp); } EXPORT_SYMBOL(security_secid_to_secctx); /** * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx * @prop: lsm specific information * @cp: the LSM context * * Convert a @prop entry to security context. If @cp is NULL the * length of the result will be returned. This does mean that the * length could change between calls to check the length and the * next call which actually allocates and returns the @cp. * * Return: Return length of data on success, error on failure. */ int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp) { return call_int_hook(lsmprop_to_secctx, prop, cp); } EXPORT_SYMBOL(security_lsmprop_to_secctx); /** * security_secctx_to_secid() - Convert a secctx to a secid * @secdata: secctx * @seclen: length of secctx * @secid: secid * * Convert security context to secid. * * Return: Returns 0 on success, error on failure. */ int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { *secid = 0; return call_int_hook(secctx_to_secid, secdata, seclen, secid); } EXPORT_SYMBOL(security_secctx_to_secid); /** * security_release_secctx() - Free a secctx buffer * @cp: the security context * * Release the security context. */ void security_release_secctx(struct lsm_context *cp) { call_void_hook(release_secctx, cp); memset(cp, 0, sizeof(*cp)); } EXPORT_SYMBOL(security_release_secctx); /** * security_inode_invalidate_secctx() - Invalidate an inode's security label * @inode: inode * * Notify the security module that it must revalidate the security context of * an inode. */ void security_inode_invalidate_secctx(struct inode *inode) { call_void_hook(inode_invalidate_secctx, inode); } EXPORT_SYMBOL(security_inode_invalidate_secctx); /** * security_inode_notifysecctx() - Notify the LSM of an inode's security label * @inode: inode * @ctx: secctx * @ctxlen: length of secctx * * Notify the security module of what the security context of an inode should * be. Initializes the incore security context managed by the security module * for this inode. Example usage: NFS client invokes this hook to initialize * the security context in its incore inode to the value provided by the server * for the file when the server returned the file's attributes to the client. * Must be called with inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return call_int_hook(inode_notifysecctx, inode, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_notifysecctx); /** * security_inode_setsecctx() - Change the security label of an inode * @dentry: inode * @ctx: secctx * @ctxlen: length of secctx * * Change the security context of an inode. Updates the incore security * context managed by the security module and invokes the fs code as needed * (via __vfs_setxattr_noperm) to update any backing xattrs that represent the * context. Example usage: NFS server invokes this hook to change the security * context in its incore inode and on the backing filesystem to a value * provided by the client on a SETATTR operation. Must be called with * inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return call_int_hook(inode_setsecctx, dentry, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_setsecctx); /** * security_inode_getsecctx() - Get the security label of an inode * @inode: inode * @cp: security context * * On success, returns 0 and fills out @cp with the security context * for the given @inode. * * Return: Returns 0 on success, error on failure. */ int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { memset(cp, 0, sizeof(*cp)); return call_int_hook(inode_getsecctx, inode, cp); } EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_WATCH_QUEUE /** * security_post_notification() - Check if a watch notification can be posted * @w_cred: credentials of the task that set the watch * @cred: credentials of the task which triggered the watch * @n: the notification * * Check to see if a watch notification can be posted to a particular queue. * * Return: Returns 0 if permission is granted. */ int security_post_notification(const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) { return call_int_hook(post_notification, w_cred, cred, n); } #endif /* CONFIG_WATCH_QUEUE */ #ifdef CONFIG_KEY_NOTIFICATIONS /** * security_watch_key() - Check if a task is allowed to watch for key events * @key: the key to watch * * Check to see if a process is allowed to watch for event notifications from * a key or keyring. * * Return: Returns 0 if permission is granted. */ int security_watch_key(struct key *key) { return call_int_hook(watch_key, key); } #endif /* CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK /** * security_unix_stream_connect() - Check if a AF_UNIX stream is allowed * @sock: originating sock * @other: peer sock * @newsk: new sock * * Check permissions before establishing a Unix domain stream connection * between @sock and @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return call_int_hook(unix_stream_connect, sock, other, newsk); } EXPORT_SYMBOL(security_unix_stream_connect); /** * security_unix_may_send() - Check if AF_UNIX socket can send datagrams * @sock: originating sock * @other: peer sock * * Check permissions before connecting or sending datagrams from @sock to * @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_may_send(struct socket *sock, struct socket *other) { return call_int_hook(unix_may_send, sock, other); } EXPORT_SYMBOL(security_unix_may_send); /** * security_socket_create() - Check if creating a new socket is allowed * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * Check permissions prior to creating a new socket. * * Return: Returns 0 if permission is granted. */ int security_socket_create(int family, int type, int protocol, int kern) { return call_int_hook(socket_create, family, type, protocol, kern); } /** * security_socket_post_create() - Initialize a newly created socket * @sock: socket * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * This hook allows a module to update or allocate a per-socket security * structure. Note that the security field was not added directly to the socket * structure, but rather, the socket security information is stored in the * associated inode. Typically, the inode alloc_security hook will allocate * and attach security information to SOCK_INODE(sock)->i_security. This hook * may be used to update the SOCK_INODE(sock)->i_security field with additional * information that wasn't available when the inode was allocated. * * Return: Returns 0 if permission is granted. */ int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { return call_int_hook(socket_post_create, sock, family, type, protocol, kern); } /** * security_socket_socketpair() - Check if creating a socketpair is allowed * @socka: first socket * @sockb: second socket * * Check permissions before creating a fresh pair of sockets. * * Return: Returns 0 if permission is granted and the connection was * established. */ int security_socket_socketpair(struct socket *socka, struct socket *sockb) { return call_int_hook(socket_socketpair, socka, sockb); } EXPORT_SYMBOL(security_socket_socketpair); /** * security_socket_bind() - Check if a socket bind operation is allowed * @sock: socket * @address: requested bind address * @addrlen: length of address * * Check permission before socket protocol layer bind operation is performed * and the socket @sock is bound to the address specified in the @address * parameter. * * Return: Returns 0 if permission is granted. */ int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_bind, sock, address, addrlen); } /** * security_socket_connect() - Check if a socket connect operation is allowed * @sock: socket * @address: address of remote connection point * @addrlen: length of address * * Check permission before socket protocol layer connect operation attempts to * connect socket @sock to a remote address, @address. * * Return: Returns 0 if permission is granted. */ int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_connect, sock, address, addrlen); } /** * security_socket_listen() - Check if a socket is allowed to listen * @sock: socket * @backlog: connection queue size * * Check permission before socket protocol layer listen operation. * * Return: Returns 0 if permission is granted. */ int security_socket_listen(struct socket *sock, int backlog) { return call_int_hook(socket_listen, sock, backlog); } /** * security_socket_accept() - Check if a socket is allowed to accept connections * @sock: listening socket * @newsock: newly creation connection socket * * Check permission before accepting a new connection. Note that the new * socket, @newsock, has been created and some information copied to it, but * the accept operation has not actually been performed. * * Return: Returns 0 if permission is granted. */ int security_socket_accept(struct socket *sock, struct socket *newsock) { return call_int_hook(socket_accept, sock, newsock); } /** * security_socket_sendmsg() - Check if sending a message is allowed * @sock: sending socket * @msg: message to send * @size: size of message * * Check permission before transmitting a message to another socket. * * Return: Returns 0 if permission is granted. */ int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return call_int_hook(socket_sendmsg, sock, msg, size); } /** * security_socket_recvmsg() - Check if receiving a message is allowed * @sock: receiving socket * @msg: message to receive * @size: size of message * @flags: operational flags * * Check permission before receiving a message from a socket. * * Return: Returns 0 if permission is granted. */ int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags) { return call_int_hook(socket_recvmsg, sock, msg, size, flags); } /** * security_socket_getsockname() - Check if reading the socket addr is allowed * @sock: socket * * Check permission before reading the local address (name) of the socket * object. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockname(struct socket *sock) { return call_int_hook(socket_getsockname, sock); } /** * security_socket_getpeername() - Check if reading the peer's addr is allowed * @sock: socket * * Check permission before the remote address (name) of a socket object. * * Return: Returns 0 if permission is granted. */ int security_socket_getpeername(struct socket *sock) { return call_int_hook(socket_getpeername, sock); } /** * security_socket_getsockopt() - Check if reading a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before retrieving the options associated with socket * @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_getsockopt, sock, level, optname); } /** * security_socket_setsockopt() - Check if setting a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before setting the options associated with socket @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_setsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_setsockopt, sock, level, optname); } /** * security_socket_shutdown() - Checks if shutting down the socket is allowed * @sock: socket * @how: flag indicating how sends and receives are handled * * Checks permission before all or part of a connection on the socket @sock is * shut down. * * Return: Returns 0 if permission is granted. */ int security_socket_shutdown(struct socket *sock, int how) { return call_int_hook(socket_shutdown, sock, how); } /** * security_sock_rcv_skb() - Check if an incoming network packet is allowed * @sk: destination sock * @skb: incoming packet * * Check permissions on incoming network packets. This hook is distinct from * Netfilter's IP input hooks since it is the first time that the incoming * sk_buff @skb has been associated with a particular socket, @sk. Must not * sleep inside this hook because some callers hold spinlocks. * * Return: Returns 0 if permission is granted. */ int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { return call_int_hook(socket_sock_rcv_skb, sk, skb); } EXPORT_SYMBOL(security_sock_rcv_skb); /** * security_socket_getpeersec_stream() - Get the remote peer label * @sock: socket * @optval: destination buffer * @optlen: size of peer label copied into the buffer * @len: maximum size of the destination buffer * * This hook allows the security module to provide peer socket security state * for unix or connected tcp sockets to userspace via getsockopt SO_GETPEERSEC. * For tcp sockets this can be meaningful if the socket is associated with an * ipsec SA. * * Return: Returns 0 if all is well, otherwise, typical getsockopt return * values. */ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) { return call_int_hook(socket_getpeersec_stream, sock, optval, optlen, len); } /** * security_socket_getpeersec_dgram() - Get the remote peer label * @sock: socket * @skb: datagram packet * @secid: remote peer label secid * * This hook allows the security module to provide peer socket security state * for udp sockets on a per-packet basis to userspace via getsockopt * SO_GETPEERSEC. The application must first have indicated the IP_PASSSEC * option via getsockopt. It can then retrieve the security state returned by * this hook for a packet via the SCM_SECURITY ancillary message type. * * Return: Returns 0 on success, error on failure. */ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return call_int_hook(socket_getpeersec_dgram, sock, skb, secid); } EXPORT_SYMBOL(security_socket_getpeersec_dgram); /** * lsm_sock_alloc - allocate a composite sock blob * @sock: the sock that needs a blob * @gfp: allocation mode * * Allocate the sock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_sock_alloc(struct sock *sock, gfp_t gfp) { return lsm_blob_alloc(&sock->sk_security, blob_sizes.lbs_sock, gfp); } /** * security_sk_alloc() - Allocate and initialize a sock's LSM blob * @sk: sock * @family: protocol family * @priority: gfp flags * * Allocate and attach a security structure to the sk->sk_security field, which * is used to copy security attributes between local stream sockets. * * Return: Returns 0 on success, error on failure. */ int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { int rc = lsm_sock_alloc(sk, priority); if (unlikely(rc)) return rc; rc = call_int_hook(sk_alloc_security, sk, family, priority); if (unlikely(rc)) security_sk_free(sk); return rc; } /** * security_sk_free() - Free the sock's LSM blob * @sk: sock * * Deallocate security structure. */ void security_sk_free(struct sock *sk) { call_void_hook(sk_free_security, sk); kfree(sk->sk_security); sk->sk_security = NULL; } /** * security_sk_clone() - Clone a sock's LSM state * @sk: original sock * @newsk: target sock * * Clone/copy security structure. */ void security_sk_clone(const struct sock *sk, struct sock *newsk) { call_void_hook(sk_clone_security, sk, newsk); } EXPORT_SYMBOL(security_sk_clone); /** * security_sk_classify_flow() - Set a flow's secid based on socket * @sk: original socket * @flic: target flow * * Set the target flow's secid to socket's secid. */ void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { call_void_hook(sk_getsecid, sk, &flic->flowic_secid); } EXPORT_SYMBOL(security_sk_classify_flow); /** * security_req_classify_flow() - Set a flow's secid based on request_sock * @req: request_sock * @flic: target flow * * Sets @flic's secid to @req's secid. */ void security_req_classify_flow(const struct request_sock *req, struct flowi_common *flic) { call_void_hook(req_classify_flow, req, flic); } EXPORT_SYMBOL(security_req_classify_flow); /** * security_sock_graft() - Reconcile LSM state when grafting a sock on a socket * @sk: sock being grafted * @parent: target parent socket * * Sets @parent's inode secid to @sk's secid and update @sk with any necessary * LSM state from @parent. */ void security_sock_graft(struct sock *sk, struct socket *parent) { call_void_hook(sock_graft, sk, parent); } EXPORT_SYMBOL(security_sock_graft); /** * security_inet_conn_request() - Set request_sock state using incoming connect * @sk: parent listening sock * @skb: incoming connection * @req: new request_sock * * Initialize the @req LSM state based on @sk and the incoming connect in @skb. * * Return: Returns 0 if permission is granted. */ int security_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return call_int_hook(inet_conn_request, sk, skb, req); } EXPORT_SYMBOL(security_inet_conn_request); /** * security_inet_csk_clone() - Set new sock LSM state based on request_sock * @newsk: new sock * @req: connection request_sock * * Set that LSM state of @sock using the LSM state from @req. */ void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { call_void_hook(inet_csk_clone, newsk, req); } /** * security_inet_conn_established() - Update sock's LSM state with connection * @sk: sock * @skb: connection packet * * Update @sock's LSM state to represent a new connection from @skb. */ void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { call_void_hook(inet_conn_established, sk, skb); } EXPORT_SYMBOL(security_inet_conn_established); /** * security_secmark_relabel_packet() - Check if setting a secmark is allowed * @secid: new secmark value * * Check if the process should be allowed to relabel packets to @secid. * * Return: Returns 0 if permission is granted. */ int security_secmark_relabel_packet(u32 secid) { return call_int_hook(secmark_relabel_packet, secid); } EXPORT_SYMBOL(security_secmark_relabel_packet); /** * security_secmark_refcount_inc() - Increment the secmark labeling rule count * * Tells the LSM to increment the number of secmark labeling rules loaded. */ void security_secmark_refcount_inc(void) { call_void_hook(secmark_refcount_inc); } EXPORT_SYMBOL(security_secmark_refcount_inc); /** * security_secmark_refcount_dec() - Decrement the secmark labeling rule count * * Tells the LSM to decrement the number of secmark labeling rules loaded. */ void security_secmark_refcount_dec(void) { call_void_hook(secmark_refcount_dec); } EXPORT_SYMBOL(security_secmark_refcount_dec); /** * security_tun_dev_alloc_security() - Allocate a LSM blob for a TUN device * @security: pointer to the LSM blob * * This hook allows a module to allocate a security structure for a TUN device, * returning the pointer in @security. * * Return: Returns a zero on success, negative values on failure. */ int security_tun_dev_alloc_security(void **security) { int rc; rc = lsm_blob_alloc(security, blob_sizes.lbs_tun_dev, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(tun_dev_alloc_security, *security); if (rc) { kfree(*security); *security = NULL; } return rc; } EXPORT_SYMBOL(security_tun_dev_alloc_security); /** * security_tun_dev_free_security() - Free a TUN device LSM blob * @security: LSM blob * * This hook allows a module to free the security structure for a TUN device. */ void security_tun_dev_free_security(void *security) { kfree(security); } EXPORT_SYMBOL(security_tun_dev_free_security); /** * security_tun_dev_create() - Check if creating a TUN device is allowed * * Check permissions prior to creating a new TUN device. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_create(void) { return call_int_hook(tun_dev_create); } EXPORT_SYMBOL(security_tun_dev_create); /** * security_tun_dev_attach_queue() - Check if attaching a TUN queue is allowed * @security: TUN device LSM blob * * Check permissions prior to attaching to a TUN device queue. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach_queue(void *security) { return call_int_hook(tun_dev_attach_queue, security); } EXPORT_SYMBOL(security_tun_dev_attach_queue); /** * security_tun_dev_attach() - Update TUN device LSM state on attach * @sk: associated sock * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's sock structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach(struct sock *sk, void *security) { return call_int_hook(tun_dev_attach, sk, security); } EXPORT_SYMBOL(security_tun_dev_attach); /** * security_tun_dev_open() - Update TUN device LSM state on open * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's security structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_open(void *security) { return call_int_hook(tun_dev_open, security); } EXPORT_SYMBOL(security_tun_dev_open); /** * security_sctp_assoc_request() - Update the LSM on a SCTP association req * @asoc: SCTP association * @skb: packet requesting the association * * Passes the @asoc and @chunk->skb of the association INIT packet to the LSM. * * Return: Returns 0 on success, error on failure. */ int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_request, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_request); /** * security_sctp_bind_connect() - Validate a list of addrs for a SCTP option * @sk: socket * @optname: SCTP option to validate * @address: list of IP addresses to validate * @addrlen: length of the address list * * Validiate permissions required for each address associated with sock @sk. * Depending on @optname, the addresses will be treated as either a connect or * bind service. The @addrlen is calculated on each IPv4 and IPv6 address using * sizeof(struct sockaddr_in) or sizeof(struct sockaddr_in6). * * Return: Returns 0 on success, error on failure. */ int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen) { return call_int_hook(sctp_bind_connect, sk, optname, address, addrlen); } EXPORT_SYMBOL(security_sctp_bind_connect); /** * security_sctp_sk_clone() - Clone a SCTP sock's LSM state * @asoc: SCTP association * @sk: original sock * @newsk: target sock * * Called whenever a new socket is created by accept(2) (i.e. a TCP style * socket) or when a socket is 'peeled off' e.g userspace calls * sctp_peeloff(3). */ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk) { call_void_hook(sctp_sk_clone, asoc, sk, newsk); } EXPORT_SYMBOL(security_sctp_sk_clone); /** * security_sctp_assoc_established() - Update LSM state when assoc established * @asoc: SCTP association * @skb: packet establishing the association * * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet to the * security module. * * Return: Returns 0 if permission is granted. */ int security_sctp_assoc_established(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_established, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_established); /** * security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket * @sk: the owning MPTCP socket * @ssk: the new subflow * * Update the labeling for the given MPTCP subflow, to match the one of the * owning MPTCP socket. This hook has to be called after the socket creation and * initialization via the security_socket_create() and * security_socket_post_create() LSM hooks. * * Return: Returns 0 on success or a negative error code on failure. */ int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk) { return call_int_hook(mptcp_add_subflow, sk, ssk); } #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND /** * security_ib_pkey_access() - Check if access to an IB pkey is allowed * @sec: LSM blob * @subnet_prefix: subnet prefix of the port * @pkey: IB pkey * * Check permission to access a pkey when modifying a QP. * * Return: Returns 0 if permission is granted. */ int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) { return call_int_hook(ib_pkey_access, sec, subnet_prefix, pkey); } EXPORT_SYMBOL(security_ib_pkey_access); /** * security_ib_endport_manage_subnet() - Check if SMPs traffic is allowed * @sec: LSM blob * @dev_name: IB device name * @port_num: port number * * Check permissions to send and receive SMPs on a end port. * * Return: Returns 0 if permission is granted. */ int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) { return call_int_hook(ib_endport_manage_subnet, sec, dev_name, port_num); } EXPORT_SYMBOL(security_ib_endport_manage_subnet); /** * security_ib_alloc_security() - Allocate an Infiniband LSM blob * @sec: LSM blob * * Allocate a security structure for Infiniband objects. * * Return: Returns 0 on success, non-zero on failure. */ int security_ib_alloc_security(void **sec) { int rc; rc = lsm_blob_alloc(sec, blob_sizes.lbs_ib, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(ib_alloc_security, *sec); if (rc) { kfree(*sec); *sec = NULL; } return rc; } EXPORT_SYMBOL(security_ib_alloc_security); /** * security_ib_free_security() - Free an Infiniband LSM blob * @sec: LSM blob * * Deallocate an Infiniband security structure. */ void security_ib_free_security(void *sec) { kfree(sec); } EXPORT_SYMBOL(security_ib_free_security); #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM /** * security_xfrm_policy_alloc() - Allocate a xfrm policy LSM blob * @ctxp: xfrm security context being added to the SPD * @sec_ctx: security label provided by userspace * @gfp: gfp flags * * Allocate a security structure to the xp->security field; the security field * is initialized to NULL when the xfrm_policy is allocated. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) { return call_int_hook(xfrm_policy_alloc_security, ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); /** * security_xfrm_policy_clone() - Clone xfrm policy LSM state * @old_ctx: xfrm security context * @new_ctxp: target xfrm security context * * Allocate a security structure in new_ctxp that contains the information from * the old_ctx structure. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { return call_int_hook(xfrm_policy_clone_security, old_ctx, new_ctxp); } /** * security_xfrm_policy_free() - Free a xfrm security context * @ctx: xfrm security context * * Free LSM resources associated with @ctx. */ void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { call_void_hook(xfrm_policy_free_security, ctx); } EXPORT_SYMBOL(security_xfrm_policy_free); /** * security_xfrm_policy_delete() - Check if deleting a xfrm policy is allowed * @ctx: xfrm security context * * Authorize deletion of a SPD entry. * * Return: Returns 0 if permission is granted. */ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { return call_int_hook(xfrm_policy_delete_security, ctx); } /** * security_xfrm_state_alloc() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @sec_ctx: security label provided by userspace * * Allocate a security structure to the @x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to @sec_ctx. * * Return: Return 0 if operation was successful. */ int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return call_int_hook(xfrm_state_alloc, x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); /** * security_xfrm_state_alloc_acquire() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @polsec: associated policy's security context * @secid: secid from the flow * * Allocate a security structure to the x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to secid. * * Return: Returns 0 if operation was successful. */ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { return call_int_hook(xfrm_state_alloc_acquire, x, polsec, secid); } /** * security_xfrm_state_delete() - Check if deleting a xfrm state is allowed * @x: xfrm state * * Authorize deletion of x->security. * * Return: Returns 0 if permission is granted. */ int security_xfrm_state_delete(struct xfrm_state *x) { return call_int_hook(xfrm_state_delete_security, x); } EXPORT_SYMBOL(security_xfrm_state_delete); /** * security_xfrm_state_free() - Free a xfrm state * @x: xfrm state * * Deallocate x->security. */ void security_xfrm_state_free(struct xfrm_state *x) { call_void_hook(xfrm_state_free_security, x); } /** * security_xfrm_policy_lookup() - Check if using a xfrm policy is allowed * @ctx: target xfrm security context * @fl_secid: flow secid used to authorize access * * Check permission when a flow selects a xfrm_policy for processing XFRMs on a * packet. The hook is called when selecting either a per-socket policy or a * generic xfrm policy. * * Return: Return 0 if permission is granted, -ESRCH otherwise, or -errno on * other errors. */ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) { return call_int_hook(xfrm_policy_lookup, ctx, fl_secid); } /** * security_xfrm_state_pol_flow_match() - Check for a xfrm match * @x: xfrm state to match * @xp: xfrm policy to check for a match * @flic: flow to check for a match. * * Check @xp and @flic for a match with @x. * * Return: Returns 1 if there is a match. */ int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) { struct lsm_static_call *scall; int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); /* * Since this function is expected to return 0 or 1, the judgment * becomes difficult if multiple LSMs supply this call. Fortunately, * we can use the first LSM's judgment because currently only SELinux * supplies this call. * * For speed optimization, we explicitly break the loop rather than * using the macro */ lsm_for_each_hook(scall, xfrm_state_pol_flow_match) { rc = scall->hl->hook.xfrm_state_pol_flow_match(x, xp, flic); break; } return rc; } /** * security_xfrm_decode_session() - Determine the xfrm secid for a packet * @skb: xfrm packet * @secid: secid * * Decode the packet in @skb and return the security label in @secid. * * Return: Return 0 if all xfrms used have the same secid. */ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) { return call_int_hook(xfrm_decode_session, skb, secid, 1); } void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) { int rc = call_int_hook(xfrm_decode_session, skb, &flic->flowic_secid, 0); BUG_ON(rc); } EXPORT_SYMBOL(security_skb_classify_flow); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS /** * security_key_alloc() - Allocate and initialize a kernel key LSM blob * @key: key * @cred: credentials * @flags: allocation flags * * Permit allocation of a key and assign security data. Note that key does not * have a serial number assigned at this point. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { int rc = lsm_key_alloc(key); if (unlikely(rc)) return rc; rc = call_int_hook(key_alloc, key, cred, flags); if (unlikely(rc)) security_key_free(key); return rc; } /** * security_key_free() - Free a kernel key LSM blob * @key: key * * Notification of destruction; free security data. */ void security_key_free(struct key *key) { kfree(key->security); key->security = NULL; } /** * security_key_permission() - Check if a kernel key operation is allowed * @key_ref: key reference * @cred: credentials of actor requesting access * @need_perm: requested permissions * * See whether a specific operational right is granted to a process on a key. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_permission(key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) { return call_int_hook(key_permission, key_ref, cred, need_perm); } /** * security_key_getsecurity() - Get the key's security label * @key: key * @buffer: security label buffer * * Get a textual representation of the security context attached to a key for * the purposes of honouring KEYCTL_GETSECURITY. This function allocates the * storage for the NUL-terminated string and the caller should free it. * * Return: Returns the length of @buffer (including terminating NUL) or -ve if * an error occurs. May also return 0 (and a NULL buffer pointer) if * there is no security label assigned to the key. */ int security_key_getsecurity(struct key *key, char **buffer) { *buffer = NULL; return call_int_hook(key_getsecurity, key, buffer); } /** * security_key_post_create_or_update() - Notification of key create or update * @keyring: keyring to which the key is linked to * @key: created or updated key * @payload: data used to instantiate or update the key * @payload_len: length of payload * @flags: key flags * @create: flag indicating whether the key was created or updated * * Notify the caller of a key creation or update. */ void security_key_post_create_or_update(struct key *keyring, struct key *key, const void *payload, size_t payload_len, unsigned long flags, bool create) { call_void_hook(key_post_create_or_update, keyring, key, payload, payload_len, flags, create); } #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT /** * security_audit_rule_init() - Allocate and init an LSM audit rule struct * @field: audit action * @op: rule operator * @rulestr: rule context * @lsmrule: receive buffer for audit rule struct * @gfp: GFP flag used for kmalloc * * Allocate and initialize an LSM audit rule structure. * * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of * an invalid rule. */ int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) { return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp); } /** * security_audit_rule_known() - Check if an audit rule contains LSM fields * @krule: audit rule * * Specifies whether given @krule contains any fields related to the current * LSM. * * Return: Returns 1 in case of relation found, 0 otherwise. */ int security_audit_rule_known(struct audit_krule *krule) { return call_int_hook(audit_rule_known, krule); } /** * security_audit_rule_free() - Free an LSM audit rule struct * @lsmrule: audit rule struct * * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init(). */ void security_audit_rule_free(void *lsmrule) { call_void_hook(audit_rule_free, lsmrule); } /** * security_audit_rule_match() - Check if a label matches an audit rule * @prop: security label * @field: LSM audit field * @op: matching operator * @lsmrule: audit rule * * Determine if given @secid matches a rule previously approved by * security_audit_rule_known(). * * Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on * failure. */ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, void *lsmrule) { return call_int_hook(audit_rule_match, prop, field, op, lsmrule); } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL /** * security_bpf() - Check if the bpf syscall operation is allowed * @cmd: command * @attr: bpf attribute * @size: size * @kernel: whether or not call originated from kernel * * Do a initial check for all bpf syscalls after the attribute is copied into * the kernel. The actual security module can implement their own rules to * check the specific cmd they need. * * Return: Returns 0 if permission is granted. */ int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { return call_int_hook(bpf, cmd, attr, size, kernel); } /** * security_bpf_map() - Check if access to a bpf map is allowed * @map: bpf map * @fmode: mode * * Do a check when the kernel generates and returns a file descriptor for eBPF * maps. * * Return: Returns 0 if permission is granted. */ int security_bpf_map(struct bpf_map *map, fmode_t fmode) { return call_int_hook(bpf_map, map, fmode); } /** * security_bpf_prog() - Check if access to a bpf program is allowed * @prog: bpf program * * Do a check when the kernel generates and returns a file descriptor for eBPF * programs. * * Return: Returns 0 if permission is granted. */ int security_bpf_prog(struct bpf_prog *prog) { return call_int_hook(bpf_prog, prog); } /** * security_bpf_map_create() - Check if BPF map creation is allowed * @map: BPF map object * @attr: BPF syscall attributes used to create BPF map * @token: BPF token used to grant user access * @kernel: whether or not call originated from kernel * * Do a check when the kernel creates a new BPF map. This is also the * point where LSM blob is allocated for LSMs that need them. * * Return: Returns 0 on success, error on failure. */ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) { return call_int_hook(bpf_map_create, map, attr, token, kernel); } /** * security_bpf_prog_load() - Check if loading of BPF program is allowed * @prog: BPF program object * @attr: BPF syscall attributes used to create BPF program * @token: BPF token used to grant user access to BPF subsystem * @kernel: whether or not call originated from kernel * * Perform an access control check when the kernel loads a BPF program and * allocates associated BPF program object. This hook is also responsible for * allocating any required LSM state for the BPF program. * * Return: Returns 0 on success, error on failure. */ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) { return call_int_hook(bpf_prog_load, prog, attr, token, kernel); } /** * security_bpf_token_create() - Check if creating of BPF token is allowed * @token: BPF token object * @attr: BPF syscall attributes used to create BPF token * @path: path pointing to BPF FS mount point from which BPF token is created * * Do a check when the kernel instantiates a new BPF token object from BPF FS * instance. This is also the point where LSM blob can be allocated for LSMs. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { return call_int_hook(bpf_token_create, token, attr, path); } /** * security_bpf_token_cmd() - Check if BPF token is allowed to delegate * requested BPF syscall command * @token: BPF token object * @cmd: BPF syscall command requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF syscall command. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd) { return call_int_hook(bpf_token_cmd, token, cmd); } /** * security_bpf_token_capable() - Check if BPF token is allowed to delegate * requested BPF-related capability * @token: BPF token object * @cap: capabilities requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF-related capabilities. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_capable(const struct bpf_token *token, int cap) { return call_int_hook(bpf_token_capable, token, cap); } /** * security_bpf_map_free() - Free a bpf map's LSM blob * @map: bpf map * * Clean up the security information stored inside bpf map. */ void security_bpf_map_free(struct bpf_map *map) { call_void_hook(bpf_map_free, map); } /** * security_bpf_prog_free() - Free a BPF program's LSM blob * @prog: BPF program struct * * Clean up the security information stored inside BPF program. */ void security_bpf_prog_free(struct bpf_prog *prog) { call_void_hook(bpf_prog_free, prog); } /** * security_bpf_token_free() - Free a BPF token's LSM blob * @token: BPF token struct * * Clean up the security information stored inside BPF token. */ void security_bpf_token_free(struct bpf_token *token) { call_void_hook(bpf_token_free, token); } #endif /* CONFIG_BPF_SYSCALL */ /** * security_locked_down() - Check if a kernel feature is allowed * @what: requested kernel feature * * Determine whether a kernel feature that potentially enables arbitrary code * execution in kernel space should be permitted. * * Return: Returns 0 if permission is granted. */ int security_locked_down(enum lockdown_reason what) { return call_int_hook(locked_down, what); } EXPORT_SYMBOL(security_locked_down); /** * security_bdev_alloc() - Allocate a block device LSM blob * @bdev: block device * * Allocate and attach a security structure to @bdev->bd_security. The * security field is initialized to NULL when the bdev structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_bdev_alloc(struct block_device *bdev) { int rc = 0; rc = lsm_bdev_alloc(bdev); if (unlikely(rc)) return rc; rc = call_int_hook(bdev_alloc_security, bdev); if (unlikely(rc)) security_bdev_free(bdev); return rc; } EXPORT_SYMBOL(security_bdev_alloc); /** * security_bdev_free() - Free a block device's LSM blob * @bdev: block device * * Deallocate the bdev security structure and set @bdev->bd_security to NULL. */ void security_bdev_free(struct block_device *bdev) { if (!bdev->bd_security) return; call_void_hook(bdev_free_security, bdev); kfree(bdev->bd_security); bdev->bd_security = NULL; } EXPORT_SYMBOL(security_bdev_free); /** * security_bdev_setintegrity() - Set the device's integrity data * @bdev: block device * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a bdev with LSMs. * LSMs should free the previously saved data if @value is NULL. * Please note that the new hook should be invoked every time the security * information is updated to keep these data current. For example, in dm-verity, * if the mapping table is reloaded and configured to use a different dm-verity * target with a new roothash and signing information, the previously stored * data in the LSM blob will become obsolete. It is crucial to re-invoke the * hook to refresh these data and ensure they are up to date. This necessity * arises from the design of device-mapper, where a device-mapper device is * first created, and then targets are subsequently loaded into it. These * targets can be modified multiple times during the device's lifetime. * Therefore, while the LSM blob is allocated during the creation of the block * device, its actual contents are not initialized at this stage and can change * substantially over time. This includes alterations from data that the LSMs * 'trusts' to those they do not, making it essential to handle these changes * correctly. Failure to address this dynamic aspect could potentially allow * for bypassing LSM checks. * * Return: Returns 0 on success, negative values on failure. */ int security_bdev_setintegrity(struct block_device *bdev, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(bdev_setintegrity, bdev, type, value, size); } EXPORT_SYMBOL(security_bdev_setintegrity); #ifdef CONFIG_PERF_EVENTS /** * security_perf_event_open() - Check if a perf event open is allowed * @type: type of event * * Check whether the @type of perf_event_open syscall is allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_open(int type) { return call_int_hook(perf_event_open, type); } /** * security_perf_event_alloc() - Allocate a perf event LSM blob * @event: perf event * * Allocate and save perf_event security info. * * Return: Returns 0 on success, error on failure. */ int security_perf_event_alloc(struct perf_event *event) { int rc; rc = lsm_blob_alloc(&event->security, blob_sizes.lbs_perf_event, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(perf_event_alloc, event); if (rc) { kfree(event->security); event->security = NULL; } return rc; } /** * security_perf_event_free() - Free a perf event LSM blob * @event: perf event * * Release (free) perf_event security info. */ void security_perf_event_free(struct perf_event *event) { kfree(event->security); event->security = NULL; } /** * security_perf_event_read() - Check if reading a perf event label is allowed * @event: perf event * * Read perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_read(struct perf_event *event) { return call_int_hook(perf_event_read, event); } /** * security_perf_event_write() - Check if writing a perf event label is allowed * @event: perf event * * Write perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_write(struct perf_event *event) { return call_int_hook(perf_event_write, event); } #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_IO_URING /** * security_uring_override_creds() - Check if overriding creds is allowed * @new: new credentials * * Check if the current task, executing an io_uring operation, is allowed to * override it's credentials with @new. * * Return: Returns 0 if permission is granted. */ int security_uring_override_creds(const struct cred *new) { return call_int_hook(uring_override_creds, new); } /** * security_uring_sqpoll() - Check if IORING_SETUP_SQPOLL is allowed * * Check whether the current task is allowed to spawn a io_uring polling thread * (IORING_SETUP_SQPOLL). * * Return: Returns 0 if permission is granted. */ int security_uring_sqpoll(void) { return call_int_hook(uring_sqpoll); } /** * security_uring_cmd() - Check if a io_uring passthrough command is allowed * @ioucmd: command * * Check whether the file_operations uring_cmd is allowed to run. * * Return: Returns 0 if permission is granted. */ int security_uring_cmd(struct io_uring_cmd *ioucmd) { return call_int_hook(uring_cmd, ioucmd); } /** * security_uring_allowed() - Check if io_uring_setup() is allowed * * Check whether the current task is allowed to call io_uring_setup(). * * Return: Returns 0 if permission is granted. */ int security_uring_allowed(void) { return call_int_hook(uring_allowed); } #endif /* CONFIG_IO_URING */ /** * security_initramfs_populated() - Notify LSMs that initramfs has been loaded * * Tells the LSMs the initramfs has been unpacked into the rootfs. */ void security_initramfs_populated(void) { call_void_hook(initramfs_populated); } |
5 2 2 1 1 1 1 1 2 1 1 1 195 195 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Checksum updating actions * * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/netlink.h> #include <net/netlink.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/icmp.h> #include <linux/icmpv6.h> #include <linux/igmp.h> #include <net/tcp.h> #include <net/udp.h> #include <net/ip6_checksum.h> #include <net/sctp/checksum.h> #include <net/act_api.h> #include <net/pkt_cls.h> #include <linux/tc_act/tc_csum.h> #include <net/tc_act/tc_csum.h> #include <net/tc_wrapper.h> static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, }; static struct tc_action_ops act_csum_ops; static int tcf_csum_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct tcf_csum_params *params_new; struct nlattr *tb[TCA_CSUM_MAX + 1]; struct tcf_chain *goto_ch = NULL; struct tc_csum *parm; struct tcf_csum *p; int ret = 0, err; u32 index; if (nla == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy, NULL); if (err < 0) return err; if (tb[TCA_CSUM_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_CSUM_PARMS]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_csum_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (err > 0) { if (bind) /* dont override defaults */ return ACT_P_BOUND; if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } } else { return err; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; p = to_tcf_csum(*a); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { err = -ENOMEM; goto put_chain; } params_new->update_flags = parm->update_flags; spin_lock_bh(&p->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); params_new = rcu_replace_pointer(p->params, params_new, lockdep_is_held(&p->tcf_lock)); spin_unlock_bh(&p->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (params_new) kfree_rcu(params_new, rcu); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } /** * tcf_csum_skb_nextlayer - Get next layer pointer * @skb: sk_buff to use * @ihl: previous summed headers length * @ipl: complete packet length * @jhl: next header length * * Check the expected next layer availability in the specified sk_buff. * Return the next layer pointer if pass, NULL otherwise. */ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, unsigned int jhl) { int ntkoff = skb_network_offset(skb); int hl = ihl + jhl; if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || skb_try_make_writable(skb, hl + ntkoff)) return NULL; else return (void *)(skb_network_header(skb) + ihl); } static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct icmphdr *icmph; icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); if (icmph == NULL) return 0; icmph->checksum = 0; skb->csum = csum_partial(icmph, ipl - ihl, 0); icmph->checksum = csum_fold(skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_igmp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct igmphdr *igmph; igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); if (igmph == NULL) return 0; igmph->csum = 0; skb->csum = csum_partial(igmph, ipl - ihl, 0); igmph->csum = csum_fold(skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct icmp6hdr *icmp6h; const struct ipv6hdr *ip6h; icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); if (icmp6h == NULL) return 0; ip6h = ipv6_hdr(skb); icmp6h->icmp6_cksum = 0; skb->csum = csum_partial(icmp6h, ipl - ihl, 0); icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ipl - ihl, IPPROTO_ICMPV6, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct tcphdr *tcph; const struct iphdr *iph; if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) return 1; tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; iph = ip_hdr(skb); tcph->check = 0; skb->csum = csum_partial(tcph, ipl - ihl, 0); tcph->check = tcp_v4_check(ipl - ihl, iph->saddr, iph->daddr, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct tcphdr *tcph; const struct ipv6hdr *ip6h; if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) return 1; tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; ip6h = ipv6_hdr(skb); tcph->check = 0; skb->csum = csum_partial(tcph, ipl - ihl, 0); tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ipl - ihl, IPPROTO_TCP, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, int udplite) { struct udphdr *udph; const struct iphdr *iph; u16 ul; if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) return 1; /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, * UDPLITE uses udph->len for another thing, * Use iph->tot_len, or just ipl. */ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); if (udph == NULL) return 0; iph = ip_hdr(skb); ul = ntohs(udph->len); if (udplite || udph->check) { udph->check = 0; if (udplite) { if (ul == 0) skb->csum = csum_partial(udph, ipl - ihl, 0); else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) skb->csum = csum_partial(udph, ul, 0); else goto ignore_obscure_skb; } else { if (ul != ipl - ihl) goto ignore_obscure_skb; skb->csum = csum_partial(udph, ul, 0); } udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, ul, iph->protocol, skb->csum); if (!udph->check) udph->check = CSUM_MANGLED_0; } skb->ip_summed = CHECKSUM_NONE; ignore_obscure_skb: return 1; } static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, int udplite) { struct udphdr *udph; const struct ipv6hdr *ip6h; u16 ul; if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) return 1; /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, * UDPLITE uses udph->len for another thing, * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. */ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); if (udph == NULL) return 0; ip6h = ipv6_hdr(skb); ul = ntohs(udph->len); udph->check = 0; if (udplite) { if (ul == 0) skb->csum = csum_partial(udph, ipl - ihl, 0); else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) skb->csum = csum_partial(udph, ul, 0); else goto ignore_obscure_skb; } else { if (ul != ipl - ihl) goto ignore_obscure_skb; skb->csum = csum_partial(udph, ul, 0); } udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, skb->csum); if (!udph->check) udph->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; ignore_obscure_skb: return 1; } static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct sctphdr *sctph; if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) return 1; sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); if (!sctph) return 0; sctph->checksum = sctp_compute_cksum(skb, skb_network_offset(skb) + ihl); skb_reset_csum_not_inet(skb); return 1; } static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { const struct iphdr *iph; int ntkoff; ntkoff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) goto fail; iph = ip_hdr(skb); switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_ICMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_IGMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, ntohs(iph->tot_len), 0)) goto fail; break; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, ntohs(iph->tot_len), 1)) goto fail; break; case IPPROTO_SCTP: if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) goto fail; ip_send_check(ip_hdr(skb)); } return 1; fail: return 0; } static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, unsigned int *pl) { int off, len, optlen; unsigned char *xh = (void *)ip6xh; off = sizeof(*ip6xh); len = ixhl - off; while (len > 1) { switch (xh[off]) { case IPV6_TLV_PAD1: optlen = 1; break; case IPV6_TLV_JUMBO: optlen = xh[off + 1] + 2; if (optlen != 6 || len < 6 || (off & 3) != 2) /* wrong jumbo option length/alignment */ return 0; *pl = ntohl(*(__be32 *)(xh + off + 2)); goto done; default: optlen = xh[off + 1] + 2; if (optlen > len) /* ignore obscure options */ goto done; break; } off += optlen; len -= optlen; } done: return 1; } static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) { struct ipv6hdr *ip6h; struct ipv6_opt_hdr *ip6xh; unsigned int hl, ixhl; unsigned int pl; int ntkoff; u8 nexthdr; ntkoff = skb_network_offset(skb); hl = sizeof(*ip6h); if (!pskb_may_pull(skb, hl + ntkoff)) goto fail; ip6h = ipv6_hdr(skb); pl = ntohs(ip6h->payload_len); nexthdr = ip6h->nexthdr; do { switch (nexthdr) { case NEXTHDR_FRAGMENT: goto ignore_skb; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) goto fail; ip6xh = (void *)(skb_network_header(skb) + hl); ixhl = ipv6_optlen(ip6xh); if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) goto fail; ip6xh = (void *)(skb_network_header(skb) + hl); if ((nexthdr == NEXTHDR_HOP) && !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) goto fail; nexthdr = ip6xh->nexthdr; hl += ixhl; break; case IPPROTO_ICMPV6: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv6_icmp(skb, hl, pl + sizeof(*ip6h))) goto fail; goto done; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv6_tcp(skb, hl, pl + sizeof(*ip6h))) goto fail; goto done; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv6_udp(skb, hl, pl + sizeof(*ip6h), 0)) goto fail; goto done; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv6_udp(skb, hl, pl + sizeof(*ip6h), 1)) goto fail; goto done; case IPPROTO_SCTP: if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) goto fail; goto done; default: goto ignore_skb; } } while (pskb_may_pull(skb, hl + 1 + ntkoff)); done: ignore_skb: return 1; fail: return 0; } TC_INDIRECT_SCOPE int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_csum *p = to_tcf_csum(a); bool orig_vlan_tag_present = false; unsigned int vlan_hdr_count = 0; struct tcf_csum_params *params; u32 update_flags; __be16 protocol; int action; params = rcu_dereference_bh(p->params); tcf_lastuse_update(&p->tcf_tm); tcf_action_update_bstats(&p->common, skb); action = READ_ONCE(p->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop; update_flags = params->update_flags; protocol = skb_protocol(skb, false); again: switch (protocol) { case cpu_to_be16(ETH_P_IP): if (!tcf_csum_ipv4(skb, update_flags)) goto drop; break; case cpu_to_be16(ETH_P_IPV6): if (!tcf_csum_ipv6(skb, update_flags)) goto drop; break; case cpu_to_be16(ETH_P_8021AD): fallthrough; case cpu_to_be16(ETH_P_8021Q): if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { protocol = skb->protocol; orig_vlan_tag_present = true; } else { struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; protocol = vlan->h_vlan_encapsulated_proto; skb_pull(skb, VLAN_HLEN); skb_reset_network_header(skb); vlan_hdr_count++; } goto again; } out: /* Restore the skb for the pulled VLAN tags */ while (vlan_hdr_count--) { skb_push(skb, VLAN_HLEN); skb_reset_network_header(skb); } return action; drop: tcf_action_inc_drop_qstats(&p->common); action = TC_ACT_SHOT; goto out; } static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_csum *p = to_tcf_csum(a); struct tcf_csum_params *params; struct tc_csum opt = { .index = p->tcf_index, .refcnt = refcount_read(&p->tcf_refcnt) - ref, .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&p->tcf_lock); params = rcu_dereference_protected(p->params, lockdep_is_held(&p->tcf_lock)); opt.action = p->tcf_action; opt.update_flags = params->update_flags; if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &p->tcf_tm); if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) goto nla_put_failure; spin_unlock_bh(&p->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&p->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_csum_cleanup(struct tc_action *a) { struct tcf_csum *p = to_tcf_csum(a); struct tcf_csum_params *params; params = rcu_dereference_protected(p->params, 1); if (params) kfree_rcu(params, rcu); } static size_t tcf_csum_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_csum)); } static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; entry->id = FLOW_ACTION_CSUM; entry->csum_flags = tcf_csum_update_flags(act); *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; fl_action->id = FLOW_ACTION_CSUM; } return 0; } static struct tc_action_ops act_csum_ops = { .kind = "csum", .id = TCA_ID_CSUM, .owner = THIS_MODULE, .act = tcf_csum_act, .dump = tcf_csum_dump, .init = tcf_csum_init, .cleanup = tcf_csum_cleanup, .get_fill_size = tcf_csum_get_fill_size, .offload_act_setup = tcf_csum_offload_act_setup, .size = sizeof(struct tcf_csum), }; MODULE_ALIAS_NET_ACT("csum"); static __net_init int csum_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id); return tc_action_net_init(net, tn, &act_csum_ops); } static void __net_exit csum_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_csum_ops.net_id); } static struct pernet_operations csum_net_ops = { .init = csum_init_net, .exit_batch = csum_exit_net, .id = &act_csum_ops.net_id, .size = sizeof(struct tc_action_net), }; MODULE_DESCRIPTION("Checksum updating actions"); MODULE_LICENSE("GPL"); static int __init csum_init_module(void) { return tcf_register_action(&act_csum_ops, &csum_net_ops); } static void __exit csum_cleanup_module(void) { tcf_unregister_action(&act_csum_ops, &csum_net_ops); } module_init(csum_init_module); module_exit(csum_cleanup_module); |
17 168 2 13 9 2978 17 35 177 1 12 210 113 5 709 2 1271 1032 496 500 7 159 160 104 865 73 17 17 2 134 1 86 11 213 66 9 1 6 132 156 154 155 146 90 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H /* * Copyright 1995 Linus Torvalds */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/compiler.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hugetlb_inline.h> struct folio_batch; unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) invalidate_mapping_pages(inode->i_mapping, 0, -1); } int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int filemap_invalidate_pages(struct address_space *mapping, loff_t pos, loff_t end, bool nowait); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); int filemap_flush(struct address_space *); int filemap_fdatawait_keep_errors(struct address_space *mapping); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); int filemap_invalidate_inode(struct inode *inode, bool flush, loff_t start, loff_t end); static inline int filemap_fdatawait(struct address_space *mapping) { return filemap_fdatawait_range(mapping, 0, LLONG_MAX); } bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); int filemap_check_errors(struct address_space *mapping); void __filemap_set_wb_err(struct address_space *mapping, int err); int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc); int kiocb_write_and_wait(struct kiocb *iocb, size_t count); static inline int filemap_write_and_wait(struct address_space *mapping) { return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); } /** * filemap_set_wb_err - set a writeback error on an address_space * @mapping: mapping in which to set writeback error * @err: error to be set in mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * filemap_set_wb_err to record the error in the mapping so that it will be * automatically reported whenever fsync is called on the file. */ static inline void filemap_set_wb_err(struct address_space *mapping, int err) { /* Fastpath for common case of no error */ if (unlikely(err)) __filemap_set_wb_err(mapping, err); } /** * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * * Grab the errseq_t value from the mapping, and see if it has changed "since" * the given value was sampled. * * If it has then report the latest error set, otherwise return 0. */ static inline int filemap_check_wb_err(struct address_space *mapping, errseq_t since) { return errseq_check(&mapping->wb_err, since); } /** * filemap_sample_wb_err - sample the current errseq_t to test for later errors * @mapping: mapping to be sampled * * Writeback errors are always reported relative to a particular sample point * in the past. This function provides those sample points. */ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) { return errseq_sample(&mapping->wb_err); } /** * file_sample_sb_err - sample the current errseq_t to test for later errors * @file: file pointer to be sampled * * Grab the most current superblock-level errseq_t value for the given * struct file. */ static inline errseq_t file_sample_sb_err(struct file *file) { return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); } /* * Flush file data before changing attributes. Caller must hold any locks * required to prevent further writes to this file until we're done setting * flags. */ static inline int inode_drain_writes(struct inode *inode) { inode_dio_wait(inode); return filemap_write_and_wait(inode->i_mapping); } static inline bool mapping_empty(struct address_space *mapping) { return xa_empty(&mapping->i_pages); } /* * mapping_shrinkable - test if page cache state allows inode reclaim * @mapping: the page cache mapping * * This checks the mapping's cache state for the pupose of inode * reclaim and LRU management. * * The caller is expected to hold the i_lock, but is not required to * hold the i_pages lock, which usually protects cache state. That's * because the i_lock and the list_lru lock that protect the inode and * its LRU state don't nest inside the irq-safe i_pages lock. * * Cache deletions are performed under the i_lock, which ensures that * when an inode goes empty, it will reliably get queued on the LRU. * * Cache additions do not acquire the i_lock and may race with this * check, in which case we'll report the inode as shrinkable when it * has cache pages. This is okay: the shrinker also checks the * refcount and the referenced bit, which will be elevated or set in * the process of adding new cache pages to an inode. */ static inline bool mapping_shrinkable(struct address_space *mapping) { void *head; /* * On highmem systems, there could be lowmem pressure from the * inodes before there is highmem pressure from the page * cache. Make inodes shrinkable regardless of cache state. */ if (IS_ENABLED(CONFIG_HIGHMEM)) return true; /* Cache completely empty? Shrink away. */ head = rcu_access_pointer(mapping->i_pages.xa_head); if (!head) return true; /* * The xarray stores single offset-0 entries directly in the * head pointer, which allows non-resident page cache entries * to escape the shadow shrinker's list of xarray nodes. The * inode shrinker needs to pick them up under memory pressure. */ if (!xa_is_node(head) && xa_is_value(head)) return true; return false; } /* * Bits in mapping->flags. */ enum mapping_flags { AS_EIO = 0, /* IO error on async write */ AS_ENOSPC = 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying folio contents */ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */ /* Bits 16-25 are used for FOLIO_ORDER */ AS_FOLIO_ORDER_BITS = 5, AS_FOLIO_ORDER_MIN = 16, AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS, }; #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1) #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN) #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX) #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK) /** * mapping_set_error - record a writeback error in the address_space * @mapping: the mapping in which an error should be set * @error: the error to set in the mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * mapping_set_error to record the error in the mapping so that it can be * reported when the application calls fsync(2). */ static inline void mapping_set_error(struct address_space *mapping, int error) { if (likely(!error)) return; /* Record in wb_err for checkers using errseq_t based tracking */ __filemap_set_wb_err(mapping, error); /* Record it in superblock */ if (mapping->host) errseq_set(&mapping->host->i_sb->s_wb_err, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_clear_unevictable(struct address_space *mapping) { clear_bit(AS_UNEVICTABLE, &mapping->flags); } static inline bool mapping_unevictable(struct address_space *mapping) { return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); } static inline int mapping_exiting(struct address_space *mapping) { return test_bit(AS_EXITING, &mapping->flags); } static inline void mapping_set_no_writeback_tags(struct address_space *mapping) { set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline int mapping_use_writeback_tags(struct address_space *mapping) { return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline bool mapping_release_always(const struct address_space *mapping) { return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_set_release_always(struct address_space *mapping) { set_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_clear_release_always(struct address_space *mapping) { clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline bool mapping_stable_writes(const struct address_space *mapping) { return test_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_stable_writes(struct address_space *mapping) { set_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_clear_stable_writes(struct address_space *mapping) { clear_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_inaccessible(struct address_space *mapping) { /* * It's expected inaccessible mappings are also unevictable. Compaction * migrate scanner (isolate_migratepages_block()) relies on this to * reduce page locking. */ set_bit(AS_UNEVICTABLE, &mapping->flags); set_bit(AS_INACCESSIBLE, &mapping->flags); } static inline bool mapping_inaccessible(struct address_space *mapping) { return test_bit(AS_INACCESSIBLE, &mapping->flags); } static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; } /* Restricts the given gfp_mask to what the mapping allows. */ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, gfp_t gfp_mask) { return mapping_gfp_mask(mapping) & gfp_mask; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->gfp_mask = mask; } /* * There are some parts of the kernel which assume that PMD entries * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, * limit the maximum allocation order to PMD size. I'm not aware of any * assumptions about maximum order if THP are disabled, but 8 seems like * a good order (that's 1MB if you're using 4kB pages) */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER #else #define PREFERRED_MAX_PAGECACHE_ORDER 8 #endif /* * xas_split_alloc() does not support arbitrary orders. This implies no * 512MB THP on ARM64 with 64KB base page size. */ #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) /* * mapping_max_folio_size_supported() - Check the max folio size supported * * The filesystem should call this function at mount time if there is a * requirement on the folio mapping size in the page cache. */ static inline size_t mapping_max_folio_size_supported(void) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER); return PAGE_SIZE; } /* * mapping_set_folio_order_range() - Set the orders supported by a file. * @mapping: The address space of the file. * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive). * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive). * * The filesystem should call this function in its inode constructor to * indicate which base size (min) and maximum size (max) of folio the VFS * can use to cache the contents of the file. This should only be used * if the filesystem needs special handling of folio sizes (ie there is * something the core cannot know). * Do not tune it based on, eg, i_size. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_folio_order_range(struct address_space *mapping, unsigned int min, unsigned int max) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; if (min > MAX_PAGECACHE_ORDER) min = MAX_PAGECACHE_ORDER; if (max > MAX_PAGECACHE_ORDER) max = MAX_PAGECACHE_ORDER; if (max < min) max = min; mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX); } static inline void mapping_set_folio_min_order(struct address_space *mapping, unsigned int min) { mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER); } /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The address space of the file. * * The filesystem should call this function in its inode constructor to * indicate that the VFS can use large folios to cache the contents of * the file. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_large_folios(struct address_space *mapping) { mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER); } static inline unsigned int mapping_max_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX; } static inline unsigned int mapping_min_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN; } static inline unsigned long mapping_min_folio_nrpages(struct address_space *mapping) { return 1UL << mapping_min_folio_order(mapping); } /** * mapping_align_index() - Align index for this mapping. * @mapping: The address_space. * @index: The page index. * * The index of a folio must be naturally aligned. If you are adding a * new folio to the page cache and need to know what index to give it, * call this function. */ static inline pgoff_t mapping_align_index(struct address_space *mapping, pgoff_t index) { return round_down(index, mapping_min_folio_nrpages(mapping)); } /* * Large folio support currently depends on THP. These dependencies are * being worked on but are not yet fixed. */ static inline bool mapping_large_folio_support(struct address_space *mapping) { /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, "Anonymous mapping always supports large folio"); return mapping_max_folio_order(mapping) > 0; } /* Return the maximum folio size for this pagecache mapping, in bytes. */ static inline size_t mapping_max_folio_size(const struct address_space *mapping) { return PAGE_SIZE << mapping_max_folio_order(mapping); } static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS return atomic_read(&mapping->nr_thps); #else return 0; #endif } static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); /** * folio_flush_mapping - Find the file mapping this folio belongs to. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Anonymous folios return NULL, even if they're in * the swap cache. Other kinds of folio also return NULL. * * This is ONLY used by architecture cache flushing code. If you aren't * writing cache flushing code, you want either folio_mapping() or * folio_file_mapping(). */ static inline struct address_space *folio_flush_mapping(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return NULL; return folio_mapping(folio); } /** * folio_inode - Get the host inode for this folio. * @folio: The folio. * * For folios which are in the page cache, return the inode that this folio * belongs to. * * Do not call this for folios which aren't in the page cache. */ static inline struct inode *folio_inode(struct folio *folio) { return folio->mapping->host; } /** * folio_attach_private - Attach private data to a folio. * @folio: Folio to attach data to. * @data: Data to attach to folio. * * Attaching private data to a folio increments the page's reference count. * The data must be detached before the folio will be freed. */ static inline void folio_attach_private(struct folio *folio, void *data) { folio_get(folio); folio->private = data; folio_set_private(folio); } /** * folio_change_private - Change private data on a folio. * @folio: Folio to change the data on. * @data: Data to set on the folio. * * Change the private data attached to a folio and return the old * data. The page must previously have had data attached and the data * must be detached before the folio will be freed. * * Return: Data that was previously attached to the folio. */ static inline void *folio_change_private(struct folio *folio, void *data) { void *old = folio_get_private(folio); folio->private = data; return old; } /** * folio_detach_private - Detach private data from a folio. * @folio: Folio to detach data from. * * Removes the data that was previously attached to the folio and decrements * the refcount on the page. * * Return: Data that was attached to the folio. */ static inline void *folio_detach_private(struct folio *folio) { void *data = folio_get_private(folio); if (!folio_test_private(folio)) return NULL; folio_clear_private(folio); folio->private = NULL; folio_put(folio); return data; } static inline void attach_page_private(struct page *page, void *data) { folio_attach_private(page_folio(page), data); } static inline void *detach_page_private(struct page *page) { return folio_detach_private(page_folio(page)); } #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { return folio_alloc_noprof(gfp, order); } #endif #define filemap_alloc_folio(...) \ alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) static inline struct page *__page_cache_alloc(gfp_t gfp) { return &filemap_alloc_folio(gfp, 0)->page; } static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(struct file *, struct folio *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); /** * typedef fgf_t - Flags for getting folios from the page cache. * * Most users of the page cache will not need to use these flags; * there are convenience functions such as filemap_get_folio() and * filemap_lock_folio(). For users which need more control over exactly * what is done with the folios, these flags to __filemap_get_folio() * are available. * * * %FGP_ACCESSED - The folio will be marked accessed. * * %FGP_LOCK - The folio is returned locked. * * %FGP_CREAT - If no folio is present then a new folio is allocated, * added to the page cache and the VM's LRU list. The folio is * returned locked. * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the * folio is already in cache. If the folio was allocated, unlock it * before returning so the caller can do the same dance. * * %FGP_WRITE - The folio will be written to by the caller. * * %FGP_NOFS - __GFP_FS will get cleared in gfp. * * %FGP_NOWAIT - Don't block on the folio lock. * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) * * %FGP_DONTCACHE - Uncached buffered IO * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() * implementation. */ typedef unsigned int __bitwise fgf_t; #define FGP_ACCESSED ((__force fgf_t)0x00000001) #define FGP_LOCK ((__force fgf_t)0x00000002) #define FGP_CREAT ((__force fgf_t)0x00000004) #define FGP_WRITE ((__force fgf_t)0x00000008) #define FGP_NOFS ((__force fgf_t)0x00000010) #define FGP_NOWAIT ((__force fgf_t)0x00000020) #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) #define FGP_STABLE ((__force fgf_t)0x00000080) #define FGP_DONTCACHE ((__force fgf_t)0x00000100) #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) static inline unsigned int filemap_get_order(size_t size) { unsigned int shift = ilog2(size); if (shift <= PAGE_SHIFT) return 0; return shift - PAGE_SHIFT; } /** * fgf_set_order - Encode a length in the fgf_t flags. * @size: The suggested size of the folio to create. * * The caller of __filemap_get_folio() can use this to suggest a preferred * size for the folio that is created. If there is already a folio at * the index, it will be returned, no matter what its size. If a folio * is freshly created, it may be of a different size than requested * due to alignment constraints, memory pressure, or the presence of * other folios at nearby indices. */ static inline fgf_t fgf_set_order(size_t size) { unsigned int order = filemap_get_order(size); if (!order) return 0; return (__force fgf_t)(order << 26); } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); /** * filemap_get_folio - Find and get a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned with an increased refcount. * * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_get_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, 0, 0); } /** * filemap_lock_folio - Find and lock a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned locked with an increased refcount. * * Context: May sleep. * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_lock_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK, 0); } /** * filemap_grab_folio - grab a folio from the page cache * @mapping: The address space to search * @index: The page index * * Looks up the page cache entry at @mapping & @index. If no folio is found, * a new folio is created. The folio is locked, marked as accessed, and * returned. * * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found * and failed to create a folio. */ static inline struct folio *filemap_grab_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping_gfp_mask(mapping)); } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */ static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); } static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, fgf_t fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @index: the page index * * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Context: May sleep. * Return: A struct page or %NULL if there is no page in the cache for this * index. */ static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK, 0); } /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); } extern pgoff_t __folio_swap_cache_index(struct folio *folio); /** * folio_index - File index of a folio. * @folio: The folio. * * For a folio which is either in the page cache or the swap cache, * return its index within the address_space it belongs to. If you know * the page is definitely in the page cache, you can look at the folio's * index directly. * * Return: The index (offset in units of pages) of a folio in its file. */ static inline pgoff_t folio_index(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return __folio_swap_cache_index(folio); return folio->index; } /** * folio_next_index - Get the index of the next folio. * @folio: The current folio. * * Return: The index of the folio which follows this folio in the file. */ static inline pgoff_t folio_next_index(struct folio *folio) { return folio->index + folio_nr_pages(folio); } /** * folio_file_page - The page for a particular index. * @folio: The folio which contains this index. * @index: The index we want to look up. * * Sometimes after looking up a folio in the page cache, we need to * obtain the specific page for an index (eg a page fault). * * Return: The page containing the file data for this index. */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } /** * folio_contains - Does this folio contain this index? * @folio: The folio. * @index: The page index within the file. * * Context: The caller should have the page locked in order to prevent * (eg) shmem from moving the page between the page cache and swap cache * and changing its index in the middle of the operation. * Return: true or false. */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { return index - folio_index(folio) < folio_nr_pages(folio); } /* * Given the page we found in the page cache, return the page corresponding * to this index in the file */ static inline struct page *find_subpage(struct page *head, pgoff_t index) { /* HugeTLBfs wants the head page regardless */ if (PageHuge(head)) return head; return head + (index & (thp_nr_pages(head) - 1)); } unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_contig(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); /* * Returns locked page at given index in given cache, creating it if needed. */ static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } struct folio *read_cache_folio(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, gfp_t flags); struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_page(mapping, index, NULL, file); } static inline struct folio *read_mapping_folio(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_folio(mapping, index, NULL, file); } /** * page_pgoff - Calculate the logical page offset of this page. * @folio: The folio containing this page. * @page: The page which we need the offset of. * * For file pages, this is the offset from the beginning of the file * in units of PAGE_SIZE. For anonymous pages, this is the offset from * the beginning of the anon_vma in units of PAGE_SIZE. This will * return nonsense for KSM pages. * * Context: Caller must have a reference on the folio or otherwise * prevent it from being split or freed. * * Return: The offset in units of PAGE_SIZE. */ static inline pgoff_t page_pgoff(const struct folio *folio, const struct page *page) { return folio->index + folio_page_idx(folio, page); } /** * folio_pos - Returns the byte position of this folio in its file. * @folio: The folio. */ static inline loff_t folio_pos(const struct folio *folio) { return ((loff_t)folio->index) * PAGE_SIZE; } /* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) { struct folio *folio = page_folio(page); return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE; } /* * Get the offset in PAGE_SIZE (even for hugetlb folios). */ static inline pgoff_t folio_pgoff(struct folio *folio) { return folio->index; } static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; } struct wait_page_key { struct folio *folio; int bit_nr; int page_match; }; struct wait_page_queue { struct folio *folio; int bit_nr; wait_queue_entry_t wait; }; static inline bool wake_page_match(struct wait_page_queue *wait_page, struct wait_page_key *key) { if (wait_page->folio != key->folio) return false; key->page_match = 1; if (wait_page->bit_nr != key->bit_nr) return false; return true; } void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); /** * folio_trylock() - Attempt to lock a folio. * @folio: The folio to attempt to lock. * * Sometimes it is undesirable to wait for a folio to be unlocked (eg * when the locks are being taken in the wrong order, or if making * progress through a batch of folios is more important than processing * them in order). Usually folio_lock() is the correct function to call. * * Context: Any context. * Return: Whether the lock was successfully acquired. */ static inline bool folio_trylock(struct folio *folio) { return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); } /* * Return true if the page was successfully locked */ static inline bool trylock_page(struct page *page) { return folio_trylock(page_folio(page)); } /** * folio_lock() - Lock this folio. * @folio: The folio to lock. * * The folio lock protects against many things, probably more than it * should. It is primarily held while a folio is being brought uptodate, * either from its backing file or from swap. It is also held while a * folio is being truncated from its address_space, so holding the lock * is sufficient to keep folio->mapping stable. * * The folio lock is also held while write() is modifying the page to * provide POSIX atomicity guarantees (as long as the write does not * cross a page boundary). Other modifications to the data in the folio * do not hold the folio lock and can race with writes, eg DMA and stores * to mapped pages. * * Context: May sleep. If you need to acquire the locks of two or * more folios, they must be in order of ascending index, if they are * in the same address_space. If they are in different address_spaces, * acquire the lock of the folio which belongs to the address_space which * has the lowest address in memory first. */ static inline void folio_lock(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) __folio_lock(folio); } /** * lock_page() - Lock the folio containing this page. * @page: The page to lock. * * See folio_lock() for a description of what the lock protects. * This is a legacy function and new code should probably use folio_lock() * instead. * * Context: May sleep. Pages in the same folio share a lock, so do not * attempt to lock two pages which share a folio. */ static inline void lock_page(struct page *page) { struct folio *folio; might_sleep(); folio = page_folio(page); if (!folio_trylock(folio)) __folio_lock(folio); } /** * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. * @folio: The folio to lock. * * Attempts to lock the folio, like folio_lock(), except that the sleep * to acquire the lock is interruptible by a fatal signal. * * Context: May sleep; see folio_lock(). * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. */ static inline int folio_lock_killable(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_killable(folio); return 0; } /* * folio_lock_or_retry - Lock the folio, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_lock implications depend on flags; see * __folio_lock_or_retry(). */ static inline vm_fault_t folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_or_retry(folio, vmf); return 0; } /* * This is exported only for folio_wait_locked/folio_wait_writeback, etc., * and should not be used directly. */ void folio_wait_bit(struct folio *folio, int bit_nr); int folio_wait_bit_killable(struct folio *folio, int bit_nr); /* * Wait for a folio to be unlocked. * * This must be called with the caller "holding" the folio, * ie with increased folio reference count so that the folio won't * go away during the wait. */ static inline void folio_wait_locked(struct folio *folio) { if (folio_test_locked(folio)) folio_wait_bit(folio, PG_locked); } static inline int folio_wait_locked_killable(struct folio *folio) { if (!folio_test_locked(folio)) return 0; return folio_wait_bit_killable(folio, PG_locked); } void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); void end_page_writeback(struct page *page); void folio_end_writeback(struct folio *folio); void folio_wait_stable(struct folio *folio); void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) { /* Avoid atomic ops, locking, etc. when not actually needed. */ if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_MIGRATION int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); #else #define filemap_migrate_folio NULL #endif void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); /* * Fault in userspace address range. */ size_t fault_in_writeable(char __user *uaddr, size_t size); size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp); void filemap_remove_folio(struct folio *folio); void __filemap_remove_folio(struct folio *folio, void *shadow); void replace_page_cache_folio(struct folio *old, struct folio *new); void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch); bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); bool filemap_range_has_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte); /** * filemap_range_needs_writeback - check if range potentially needs writeback * @mapping: address space within which to check * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Find at least one page in the range supplied, usually used to check if * direct writing in this range will trigger a writeback. Used by O_DIRECT * read/write with IOCB_NOWAIT, to see if the caller needs to do * filemap_write_and_wait_range() before proceeding. * * Return: %true if the caller should do filemap_write_and_wait_range() before * doing O_DIRECT to a page in this range, %false otherwise. */ static inline bool filemap_range_needs_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { if (!mapping->nrpages) return false; if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) return false; return filemap_range_has_writeback(mapping, start_byte, end_byte); } /** * struct readahead_control - Describes a readahead request. * * A readahead request is for consecutive pages. Filesystems which * implement the ->readahead method should call readahead_page() or * readahead_page_batch() in a loop and attempt to start I/O against * each page in the request. * * Most of the fields in this struct are private and should be accessed * by the functions below. * * @file: The file, used primarily by network filesystems for authentication. * May be NULL if invoked internally by the filesystem. * @mapping: Readahead this filesystem object. * @ra: File readahead state. May be NULL. */ struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; /* private: use the readahead_* accessors instead */ pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; bool dropbehind; bool _workingset; unsigned long _pflags; }; #define DEFINE_READAHEAD(ractl, f, r, m, i) \ struct readahead_control ractl = { \ .file = f, \ .mapping = m, \ .ra = r, \ ._index = i, \ } #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); void page_cache_async_ra(struct readahead_control *, struct folio *, unsigned long req_count); void readahead_expand(struct readahead_control *ractl, loff_t new_start, size_t new_len); /** * page_cache_sync_readahead - generic file readahead * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @index: Index of first page to be read. * @req_count: Total number of pages being read by the caller. * * page_cache_sync_readahead() should be called when a cache miss happened: * it will submit the read. The readahead logic may decide to piggyback more * pages onto the read request if access patterns suggest it will improve * performance. */ static inline void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); page_cache_sync_ra(&ractl, req_count); } /** * page_cache_async_readahead - file readahead for marked pages * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @folio: The folio which triggered the readahead call. * @req_count: Total number of pages being read by the caller. * * page_cache_async_readahead() should be called when a page is used which * is marked as PageReadahead; this is a marker to suggest that the application * has used up enough of the readahead window that we should start pulling in * more pages. */ static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, struct folio *folio, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); page_cache_async_ra(&ractl, folio, req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) { struct folio *folio; BUG_ON(ractl->_batch_count > ractl->_nr_pages); ractl->_nr_pages -= ractl->_batch_count; ractl->_index += ractl->_batch_count; if (!ractl->_nr_pages) { ractl->_batch_count = 0; return NULL; } folio = xa_load(&ractl->mapping->i_pages, ractl->_index); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); ractl->_batch_count = folio_nr_pages(folio); return folio; } /** * readahead_page - Get the next page to read. * @ractl: The current readahead request. * * Context: The page is locked and has an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: A pointer to the next page, or %NULL if we are done. */ static inline struct page *readahead_page(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); return &folio->page; } /** * readahead_folio - Get the next folio to read. * @ractl: The current readahead request. * * Context: The folio is locked. The caller should unlock the folio once * all I/O to that folio has completed. * Return: A pointer to the next folio, or %NULL if we are done. */ static inline struct folio *readahead_folio(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); if (folio) folio_put(folio); return folio; } static inline unsigned int __readahead_batch(struct readahead_control *rac, struct page **array, unsigned int array_sz) { unsigned int i = 0; XA_STATE(xas, &rac->mapping->i_pages, 0); struct page *page; BUG_ON(rac->_batch_count > rac->_nr_pages); rac->_nr_pages -= rac->_batch_count; rac->_index += rac->_batch_count; rac->_batch_count = 0; xas_set(&xas, rac->_index); rcu_read_lock(); xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { if (xas_retry(&xas, page)) continue; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; rac->_batch_count += thp_nr_pages(page); if (i == array_sz) break; } rcu_read_unlock(); return i; } /** * readahead_page_batch - Get a batch of pages to read. * @rac: The current readahead request. * @array: An array of pointers to struct page. * * Context: The pages are locked and have an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: The number of pages placed in the array. 0 indicates the request * is complete. */ #define readahead_page_batch(rac, array) \ __readahead_batch(rac, array, ARRAY_SIZE(array)) /** * readahead_pos - The byte offset into the file of this readahead request. * @rac: The readahead request. */ static inline loff_t readahead_pos(struct readahead_control *rac) { return (loff_t)rac->_index * PAGE_SIZE; } /** * readahead_length - The number of bytes in this readahead request. * @rac: The readahead request. */ static inline size_t readahead_length(struct readahead_control *rac) { return rac->_nr_pages * PAGE_SIZE; } /** * readahead_index - The index of the first page in this readahead request. * @rac: The readahead request. */ static inline pgoff_t readahead_index(struct readahead_control *rac) { return rac->_index; } /** * readahead_count - The number of pages in this readahead request. * @rac: The readahead request. */ static inline unsigned int readahead_count(struct readahead_control *rac) { return rac->_nr_pages; } /** * readahead_batch_length - The number of bytes in the current batch. * @rac: The readahead request. */ static inline size_t readahead_batch_length(struct readahead_control *rac) { return rac->_batch_count * PAGE_SIZE; } static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } /** * folio_mkwrite_check_truncate - check if folio was truncated * @folio: the folio to check * @inode: the inode to check the folio against * * Return: the number of bytes in the folio up to EOF, * or -EFAULT if the folio was truncated. */ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, struct inode *inode) { loff_t size = i_size_read(inode); pgoff_t index = size >> PAGE_SHIFT; size_t offset = offset_in_folio(folio, size); if (!folio->mapping) return -EFAULT; /* folio is wholly inside EOF */ if (folio_next_index(folio) - 1 < index) return folio_size(folio); /* folio is wholly past EOF */ if (folio->index > index || !offset) return -EFAULT; /* folio is partially inside EOF */ return offset; } /** * i_blocks_per_folio - How many blocks fit in this folio. * @inode: The inode which contains the blocks. * @folio: The folio. * * If the block size is larger than the size of this folio, return zero. * * Context: The caller should hold a refcount on the folio to prevent it * from being split. * Return: The number of filesystem blocks covered by this folio. */ static inline unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) { return folio_size(folio) >> inode->i_blkbits; } #endif /* _LINUX_PAGEMAP_H */ |
3 22 19 1 33 27 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * KVM Microsoft Hyper-V emulation * * derived from arch/x86/kvm/x86.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * Andrey Smetanin <asmetanin@virtuozzo.com> */ #ifndef __ARCH_X86_KVM_HYPERV_H__ #define __ARCH_X86_KVM_HYPERV_H__ #include <linux/kvm_host.h> #include "x86.h" #ifdef CONFIG_KVM_HYPERV /* "Hv#1" signature */ #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 /* * The #defines related to the synthetic debugger are required by KDNet, but * they are not documented in the Hyper-V TLFS because the synthetic debugger * functionality has been deprecated and is subject to removal in future * versions of Windows. */ #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082 /* * Hyper-V synthetic debugger platform capabilities * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits. */ #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) /* Hyper-V Synthetic debug options MSR */ #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) { return &kvm->arch.hyperv; } static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) { return vcpu->arch.hyperv; } static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); return &hv_vcpu->synic; } static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) { struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic); return hv_vcpu->vcpu; } static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) { return &vcpu->kvm->arch.hyperv.hv_syndbg; } static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; } int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) { return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; } int kvm_hv_hypercall(struct kvm_vcpu *vcpu); void kvm_hv_irq_routing_update(struct kvm *kvm); int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) { return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap); } static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) { return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap); } void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu); static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, int timer_index) { return &to_hv_vcpu(vcpu)->stimer[timer_index]; } static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) { struct kvm_vcpu_hv *hv_vcpu; hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, stimer[0]); return hv_vcpu->vcpu; } static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); if (!hv_vcpu) return false; return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); } /* * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8]) * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to. */ static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); /* * If Hyper-V's invariant TSC control is not exposed to the guest, * the invariant TSC CPUID flag is not suppressed, Windows guests were * observed to be able to handle it correctly. Going forward, VMMs are * encouraged to enable Hyper-V's invariant TSC control when invariant * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V. */ if (!hv_vcpu || !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT)) return false; /* * If Hyper-V's invariant TSC control is exposed to the guest, KVM is * responsible for suppressing the invariant TSC CPUID flag if the * Hyper-V control is not enabled. */ return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC); } void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock); void kvm_hv_request_tsc_page_update(struct kvm *kvm); void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu); void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled); int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce); int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu, bool is_guest_mode) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO : HV_L1_TLB_FLUSH_FIFO; return &hv_vcpu->tlb_flush_fifo[i]; } static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) return; tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); kfifo_reset_out(&tlb_flush_fifo->entries); } static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); return hv_vcpu && (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH); } static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); u16 code; if (!hv_vcpu) return false; code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) : kvm_rax_read(vcpu); return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX); } static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) { if (!to_hv_vcpu(vcpu)) return 0; if (!kvm_hv_assist_page_enabled(vcpu)) return 0; return kvm_hv_get_assist_page(vcpu); } static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) { /* * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or * L2's VP_ID upon request from the guest. Make sure we check for * pending entries in the right FIFO upon L1/L2 transition as these * requests are put by other vCPUs asynchronously. */ if (to_hv_vcpu(vcpu) && tdp_enabled) kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); } int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu); #else /* CONFIG_KVM_HYPERV */ static inline void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock) {} static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {} static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {} static inline void kvm_hv_init_vm(struct kvm *kvm) {} static inline void kvm_hv_destroy_vm(struct kvm *kvm) {} static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) { return 0; } static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) { return false; } static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { return HV_STATUS_ACCESS_DENIED; } static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {} static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) { return false; } static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) { return false; } static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {} static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) { return false; } static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {} static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) { return false; } static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) { return false; } static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) { return false; } static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) { return 0; } static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) { return vcpu->vcpu_idx; } static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {} #endif /* CONFIG_KVM_HYPERV */ #endif /* __ARCH_X86_KVM_HYPERV_H__ */ |
4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 | // SPDX-License-Identifier: GPL-2.0 /* * n_gsm.c GSM 0710 tty multiplexor * Copyright (c) 2009/10 Intel Corporation * Copyright (c) 2022/23 Siemens Mobility GmbH * * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE * * * Outgoing path: * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc * control message -> GSM MUX control queue --´ * * Incoming path: * ldisc -> gsm_queue() -o--> tty * `-> gsm_control_response() * * TO DO: * Mostly done: ioctls for setting modes/timing * Partly done: hooks so you can pull off frames to non tty devs * Restart DLCI 0 when it closes ? * Improve the tx engine * Resolve tx side locking by adding a queue_head and routing * all control traffic via it * General tidy/document * Review the locking/move to refcounts more (mux now moved to an * alloc/free model ready) * Use newest tty open/close port helpers and install hooks * What to do about power functions ? * Termios setting and negotiation * Do we need a 'which mux are you' ioctl to correlate mux and tty sets * */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/bitfield.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/math.h> #include <linux/nospec.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/tty_flip.h> #include <linux/tty_driver.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/skbuff.h> #include <net/arp.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/gsmmux.h> #include "tty.h" static int debug; module_param(debug, int, 0600); /* Module debug bits */ #define DBG_DUMP BIT(0) /* Data transmission dump. */ #define DBG_CD_ON BIT(1) /* Always assume CD line on. */ #define DBG_DATA BIT(2) /* Data transmission details. */ #define DBG_ERRORS BIT(3) /* Details for fail conditions. */ #define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */ #define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */ /* Defaults: these are from the specification */ #define T1 10 /* 100mS */ #define T2 34 /* 333mS */ #define T3 10 /* 10s */ #define N2 3 /* Retry 3 times */ #define K 2 /* outstanding I frames */ #define MAX_T3 255 /* In seconds. */ #define MAX_WINDOW_SIZE 7 /* Limit of K in error recovery mode. */ /* Use long timers for testing at low speed with debug on */ #ifdef DEBUG_TIMING #define T1 100 #define T2 200 #endif /* * Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte * limits so this is plenty */ #define MAX_MRU 1500 #define MAX_MTU 1500 #define MIN_MTU (PROT_OVERHEAD + 1) /* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ #define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) /* * struct gsm_mux_net - network interface * * Created when net interface is initialized. */ struct gsm_mux_net { struct kref ref; struct gsm_dlci *dlci; }; /* * Each block of data we have queued to go out is in the form of * a gsm_msg which holds everything we need in a link layer independent * format */ struct gsm_msg { struct list_head list; u8 addr; /* DLCI address + flags */ u8 ctrl; /* Control byte + flags */ unsigned int len; /* Length of data block (can be zero) */ u8 *data; /* Points into buffer but not at the start */ u8 buffer[]; }; enum gsm_dlci_state { DLCI_CLOSED, DLCI_WAITING_CONFIG, /* Waiting for DLCI configuration from user */ DLCI_CONFIGURE, /* Sending PN (for adaption > 1) */ DLCI_OPENING, /* Sending SABM not seen UA */ DLCI_OPEN, /* SABM/UA complete */ DLCI_CLOSING, /* Sending DISC not seen UA/DM */ }; enum gsm_dlci_mode { DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */ DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */ }; /* * Each active data link has a gsm_dlci structure associated which ties * the link layer to an optional tty (if the tty side is open). To avoid * complexity right now these are only ever freed up when the mux is * shut down. * * At the moment we don't free DLCI objects until the mux is torn down * this avoid object life time issues but might be worth review later. */ struct gsm_dlci { struct gsm_mux *gsm; int addr; enum gsm_dlci_state state; struct mutex mutex; /* Link layer */ enum gsm_dlci_mode mode; spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; /* Uplink tty if active */ struct tty_port port; /* The tty bound to this DLCI if there is one */ #define TX_SIZE 4096 /* Must be power of 2. */ struct kfifo fifo; /* Queue fifo for the DLCI */ int adaption; /* Adaption layer in use */ int prev_adaption; u32 modem_rx; /* Our incoming virtual modem lines */ u32 modem_tx; /* Our outgoing modem lines */ unsigned int mtu; bool dead; /* Refuse re-open */ /* Configuration */ u8 prio; /* Priority */ u8 ftype; /* Frame type */ u8 k; /* Window size */ /* Flow control */ bool throttled; /* Private copy of throttle state */ bool constipated; /* Throttle status for outgoing */ /* Packetised I/O */ struct sk_buff *skb; /* Frame being sent */ struct sk_buff_head skb_list; /* Queued frames */ /* Data handling callback */ void (*data)(struct gsm_dlci *dlci, const u8 *data, int len); void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len); struct net_device *net; /* network interface, if created */ }; /* * Parameter bits used for parameter negotiation according to 3GPP 27.010 * chapter 5.4.6.3.1. */ struct gsm_dlci_param_bits { u8 d_bits; u8 i_cl_bits; u8 p_bits; u8 t_bits; __le16 n_bits; u8 na_bits; u8 k_bits; }; static_assert(sizeof(struct gsm_dlci_param_bits) == 8); #define PN_D_FIELD_DLCI GENMASK(5, 0) #define PN_I_CL_FIELD_FTYPE GENMASK(3, 0) #define PN_I_CL_FIELD_ADAPTION GENMASK(7, 4) #define PN_P_FIELD_PRIO GENMASK(5, 0) #define PN_T_FIELD_T1 GENMASK(7, 0) #define PN_N_FIELD_N1 GENMASK(15, 0) #define PN_NA_FIELD_N2 GENMASK(7, 0) #define PN_K_FIELD_K GENMASK(2, 0) /* Total number of supported devices */ #define GSM_TTY_MINORS 256 /* DLCI 0, 62/63 are special or reserved see gsmtty_open */ #define NUM_DLCI 64 /* * DLCI 0 is used to pass control blocks out of band of the data * flow (and with a higher link priority). One command can be outstanding * at a time and we use this structure to manage them. They are created * and destroyed by the user context, and updated by the receive paths * and timers */ struct gsm_control { u8 cmd; /* Command we are issuing */ u8 *data; /* Data for the command in case we retransmit */ int len; /* Length of block for retransmission */ int done; /* Done flag */ int error; /* Error if any */ }; enum gsm_encoding { GSM_BASIC_OPT, GSM_ADV_OPT, }; enum gsm_mux_state { GSM_SEARCH, GSM0_ADDRESS, GSM0_CONTROL, GSM0_LEN0, GSM0_LEN1, GSM0_DATA, GSM0_FCS, GSM0_SSOF, GSM1_START, GSM1_ADDRESS, GSM1_CONTROL, GSM1_DATA, GSM1_OVERRUN, }; /* * Each GSM mux we have is represented by this structure. If we are * operating as an ldisc then we use this structure as our ldisc * state. We need to sort out lifetimes and locking with respect * to the gsm mux array. For now we don't free DLCI objects that * have been instantiated until the mux itself is terminated. * * To consider further: tty open versus mux shutdown. */ struct gsm_mux { struct tty_struct *tty; /* The tty our ldisc is bound to */ spinlock_t lock; struct mutex mutex; unsigned int num; struct kref ref; /* Events on the GSM channel */ wait_queue_head_t event; /* ldisc send work */ struct work_struct tx_work; /* Bits for GSM mode decoding */ /* Framing Layer */ u8 *buf; enum gsm_mux_state state; unsigned int len; unsigned int address; unsigned int count; bool escape; enum gsm_encoding encoding; u8 control; u8 fcs; u8 *txframe; /* TX framing buffer */ /* Method for the receiver side */ void (*receive)(struct gsm_mux *gsm, u8 ch); /* Link Layer */ unsigned int mru; unsigned int mtu; int initiator; /* Did we initiate connection */ bool dead; /* Has the mux been shut down */ struct gsm_dlci *dlci[NUM_DLCI]; int old_c_iflag; /* termios c_iflag value before attach */ bool constipated; /* Asked by remote to shut up */ bool has_devices; /* Devices were registered */ spinlock_t tx_lock; unsigned int tx_bytes; /* TX data outstanding */ #define TX_THRESH_HI 8192 #define TX_THRESH_LO 2048 struct list_head tx_ctrl_list; /* Pending control packets */ struct list_head tx_data_list; /* Pending data packets */ /* Control messages */ struct timer_list kick_timer; /* Kick TX queuing on timeout */ struct timer_list t2_timer; /* Retransmit timer for commands */ int cretries; /* Command retry counter */ struct gsm_control *pending_cmd;/* Our current pending command */ spinlock_t control_lock; /* Protects the pending command */ /* Keep-alive */ struct timer_list ka_timer; /* Keep-alive response timer */ u8 ka_num; /* Keep-alive match pattern */ signed int ka_retries; /* Keep-alive retry counter, -1 if not yet initialized */ /* Configuration */ int adaption; /* 1 or 2 supported */ u8 ftype; /* UI or UIH */ int t1, t2; /* Timers in 1/100th of a sec */ unsigned int t3; /* Power wake-up timer in seconds. */ int n2; /* Retry count */ u8 k; /* Window size */ bool wait_config; /* Wait for configuration by ioctl before DLCI open */ u32 keep_alive; /* Control channel keep-alive in 10ms */ /* Statistics (not currently exposed) */ unsigned long bad_fcs; unsigned long malformed; unsigned long io_error; unsigned long open_error; unsigned long bad_size; unsigned long unsupported; }; /* * Mux objects - needed so that we can translate a tty index into the * relevant mux and DLCI. */ #define MAX_MUX 4 /* 256 minors */ static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */ static DEFINE_SPINLOCK(gsm_mux_lock); static struct tty_driver *gsm_tty_driver; /* * This section of the driver logic implements the GSM encodings * both the basic and the 'advanced'. Reliable transport is not * supported. */ #define CR 0x02 #define EA 0x01 #define PF 0x10 /* I is special: the rest are ..*/ #define RR 0x01 #define UI 0x03 #define RNR 0x05 #define REJ 0x09 #define DM 0x0F #define SABM 0x2F #define DISC 0x43 #define UA 0x63 #define UIH 0xEF /* Channel commands */ #define CMD_NSC 0x09 #define CMD_TEST 0x11 #define CMD_PSC 0x21 #define CMD_RLS 0x29 #define CMD_FCOFF 0x31 #define CMD_PN 0x41 #define CMD_RPN 0x49 #define CMD_FCON 0x51 #define CMD_CLD 0x61 #define CMD_SNC 0x69 #define CMD_MSC 0x71 /* Virtual modem bits */ #define MDM_FC 0x01 #define MDM_RTC 0x02 #define MDM_RTR 0x04 #define MDM_IC 0x20 #define MDM_DV 0x40 #define GSM0_SOF 0xF9 #define GSM1_SOF 0x7E #define GSM1_ESCAPE 0x7D #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 #define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; /* * CRC table for GSM 0710 */ static const u8 gsm_fcs8[256] = { 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75, 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B, 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69, 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67, 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D, 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43, 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51, 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F, 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05, 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B, 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19, 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17, 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D, 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33, 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21, 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F, 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95, 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B, 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89, 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87, 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD, 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3, 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1, 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF, 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5, 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB, 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9, 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7, 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD, 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3, 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1, 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF }; #define INIT_FCS 0xFF #define GOOD_FCS 0xCF static void gsm_dlci_close(struct gsm_dlci *dlci); static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, u8 ctrl); static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg); static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr); static void gsmld_write_trigger(struct gsm_mux *gsm); static void gsmld_write_task(struct work_struct *work); /** * gsm_fcs_add - update FCS * @fcs: Current FCS * @c: Next data * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add(u8 fcs, u8 c) { return gsm_fcs8[fcs ^ c]; } /** * gsm_fcs_add_block - update FCS for a block * @fcs: Current FCS * @c: buffer of data * @len: length of buffer * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) { while (len--) fcs = gsm_fcs8[fcs ^ *c++]; return fcs; } /** * gsm_read_ea - read a byte into an EA * @val: variable holding value * @c: byte going into the EA * * Processes one byte of an EA. Updates the passed variable * and returns 1 if the EA is now completely read */ static int gsm_read_ea(unsigned int *val, u8 c) { /* Add the next 7 bits into the value */ *val <<= 7; *val |= c >> 1; /* Was this the last byte of the EA 1 = yes*/ return c & EA; } /** * gsm_read_ea_val - read a value until EA * @val: variable holding value * @data: buffer of data * @dlen: length of data * * Processes an EA value. Updates the passed variable and * returns the processed data length. */ static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen) { unsigned int len = 0; for (; dlen > 0; dlen--) { len++; if (gsm_read_ea(val, *data++)) break; } return len; } /** * gsm_encode_modem - encode modem data bits * @dlci: DLCI to encode from * * Returns the correct GSM encoded modem status bits (6 bit field) for * the current status of the DLCI and attached tty object */ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) { u8 modembits = 0; /* FC is true flow control not modem bits */ if (dlci->throttled) modembits |= MDM_FC; if (dlci->modem_tx & TIOCM_DTR) modembits |= MDM_RTC; if (dlci->modem_tx & TIOCM_RTS) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; /* special mappings for passive side to operate as UE */ if (dlci->modem_tx & TIOCM_OUT1) modembits |= MDM_IC; if (dlci->modem_tx & TIOCM_OUT2) modembits |= MDM_DV; return modembits; } static void gsm_hex_dump_bytes(const char *fname, const u8 *data, unsigned long len) { char *prefix; if (!fname) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len, true); return; } prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, true); kfree(prefix); } /** * gsm_encode_params - encode DLCI parameters * @dlci: DLCI to encode from * @params: buffer to fill with the encoded parameters * * Encodes the parameters according to GSM 07.10 section 5.4.6.3.1 * table 3. */ static int gsm_encode_params(const struct gsm_dlci *dlci, struct gsm_dlci_param_bits *params) { const struct gsm_mux *gsm = dlci->gsm; unsigned int i, cl; switch (dlci->ftype) { case UIH: i = 0; /* UIH */ break; case UI: i = 1; /* UI */ break; default: pr_debug("unsupported frame type %d\n", dlci->ftype); return -EINVAL; } switch (dlci->adaption) { case 1: /* Unstructured */ cl = 0; /* convergence layer type 1 */ break; case 2: /* Unstructured with modem bits. */ cl = 1; /* convergence layer type 2 */ break; default: pr_debug("unsupported adaption %d\n", dlci->adaption); return -EINVAL; } params->d_bits = FIELD_PREP(PN_D_FIELD_DLCI, dlci->addr); /* UIH, convergence layer type 1 */ params->i_cl_bits = FIELD_PREP(PN_I_CL_FIELD_FTYPE, i) | FIELD_PREP(PN_I_CL_FIELD_ADAPTION, cl); params->p_bits = FIELD_PREP(PN_P_FIELD_PRIO, dlci->prio); params->t_bits = FIELD_PREP(PN_T_FIELD_T1, gsm->t1); params->n_bits = cpu_to_le16(FIELD_PREP(PN_N_FIELD_N1, dlci->mtu)); params->na_bits = FIELD_PREP(PN_NA_FIELD_N2, gsm->n2); params->k_bits = FIELD_PREP(PN_K_FIELD_K, dlci->k); return 0; } /** * gsm_register_devices - register all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static int gsm_register_devices(struct tty_driver *driver, unsigned int index) { struct device *dev; int i; unsigned int base; if (!driver || index >= MAX_MUX) return -EINVAL; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't register device 0 - this is the control channel * and not a usable tty interface */ dev = tty_register_device(gsm_tty_driver, base + i, NULL); if (IS_ERR(dev)) { if (debug & DBG_ERRORS) pr_info("%s failed to register device minor %u", __func__, base + i); for (i--; i >= 1; i--) tty_unregister_device(gsm_tty_driver, base + i); return PTR_ERR(dev); } } return 0; } /** * gsm_unregister_devices - unregister all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static void gsm_unregister_devices(struct tty_driver *driver, unsigned int index) { int i; unsigned int base; if (!driver || index >= MAX_MUX) return; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't unregister device 0 - this is the control * channel and not a usable tty interface */ tty_unregister_device(gsm_tty_driver, base + i); } } /** * gsm_print_packet - display a frame for debug * @hdr: header to print before decode * @addr: address EA from the frame * @cr: C/R bit seen as initiator * @control: control including PF bit * @data: following data bytes * @dlen: length of data * * Displays a packet in human readable format for debugging purposes. The * style is based on amateur radio LAP-B dump display. */ static void gsm_print_packet(const char *hdr, int addr, int cr, u8 control, const u8 *data, int dlen) { if (!(debug & DBG_DUMP)) return; /* Only show user payload frames if debug & DBG_PAYLOAD */ if (!(debug & DBG_PAYLOAD) && addr != 0) if ((control & ~PF) == UI || (control & ~PF) == UIH) return; pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]); switch (control & ~PF) { case SABM: pr_cont("SABM"); break; case UA: pr_cont("UA"); break; case DISC: pr_cont("DISC"); break; case DM: pr_cont("DM"); break; case UI: pr_cont("UI"); break; case UIH: pr_cont("UIH"); break; default: if (!(control & 0x01)) { pr_cont("I N(S)%d N(R)%d", (control & 0x0E) >> 1, (control & 0xE0) >> 5); } else switch (control & 0x0F) { case RR: pr_cont("RR(%d)", (control & 0xE0) >> 5); break; case RNR: pr_cont("RNR(%d)", (control & 0xE0) >> 5); break; case REJ: pr_cont("REJ(%d)", (control & 0xE0) >> 5); break; default: pr_cont("[%02X]", control); } } if (control & PF) pr_cont("(P)"); else pr_cont("(F)"); gsm_hex_dump_bytes(NULL, data, dlen); } /* * Link level transmission side */ /** * gsm_stuff_frame - bytestuff a packet * @input: input buffer * @output: output buffer * @len: length of input * * Expand a buffer by bytestuffing it. The worst case size change * is doubling and the caller is responsible for handing out * suitable sized buffers. */ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) { int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE || (*input & ISO_IEC_646_MASK) == XON || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; } else *output++ = *input++; olen++; } return olen; } /** * gsm_send - send a control frame * @gsm: our GSM mux * @addr: address for control frame * @cr: command/response bit seen as initiator * @control: control byte including PF bit * * Format up and transmit a control frame. These should be transmitted * ahead of data when they are needed. */ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) { struct gsm_msg *msg; u8 *dp; int ocr; unsigned long flags; msg = gsm_data_alloc(gsm, addr, 0, control); if (!msg) return -ENOMEM; /* toggle C/R coding if not initiator */ ocr = cr ^ (gsm->initiator ? 0 : 1); msg->data -= 3; dp = msg->data; *dp++ = (addr << 2) | (ocr << 1) | EA; *dp++ = control; if (gsm->encoding == GSM_BASIC_OPT) *dp++ = EA; /* Length of data = 0 */ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data); msg->len = (dp - msg->data) + 1; gsm_print_packet("Q->", addr, cr, control, NULL, 0); spin_lock_irqsave(&gsm->tx_lock, flags); list_add_tail(&msg->list, &gsm->tx_ctrl_list); gsm->tx_bytes += msg->len; spin_unlock_irqrestore(&gsm->tx_lock, flags); gsmld_write_trigger(gsm); return 0; } /** * gsm_dlci_clear_queues - remove outstanding data for a DLCI * @gsm: mux * @dlci: clear for this DLCI * * Clears the data queues for a given DLCI. */ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int addr = dlci->addr; unsigned long flags; /* Clear DLCI write fifo first */ spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); spin_unlock_irqrestore(&dlci->lock, flags); /* Clear data packets in MUX write queue */ spin_lock_irqsave(&gsm->tx_lock, flags); list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { if (msg->addr != addr) continue; gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); } spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsm_response - send a control response * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level response frame. */ static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 0, control); } /** * gsm_command - send a control command * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level command frame. */ static inline void gsm_command(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 1, control); } /* Data transmission */ #define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */ /** * gsm_data_alloc - allocate data frame * @gsm: GSM mux * @addr: DLCI address * @len: length excluding header and FCS * @ctrl: control byte * * Allocate a new data buffer for sending frames with data. Space is left * at the front for header bytes but that is treated as an implementation * detail and not for the high level code to use */ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, u8 ctrl) { struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN, GFP_ATOMIC); if (m == NULL) return NULL; m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */ m->len = len; m->addr = addr; m->ctrl = ctrl; INIT_LIST_HEAD(&m->list); return m; } /** * gsm_send_packet - sends a single packet * @gsm: GSM Mux * @msg: packet to send * * The given packet is encoded and sent out. No memory is freed. * The caller must hold the gsm tx lock. */ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg) { int len, ret; if (gsm->encoding == GSM_BASIC_OPT) { gsm->txframe[0] = GSM0_SOF; memcpy(gsm->txframe + 1, msg->data, msg->len); gsm->txframe[msg->len + 1] = GSM0_SOF; len = msg->len + 2; } else { gsm->txframe[0] = GSM1_SOF; len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len); gsm->txframe[len + 1] = GSM1_SOF; len += 2; } if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, gsm->txframe, len); gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data, msg->len); ret = gsmld_output(gsm, gsm->txframe, len); if (ret <= 0) return ret; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; return 0; } /** * gsm_is_flow_ctrl_msg - checks if flow control message * @msg: message to check * * Returns true if the given message is a flow control command of the * control channel. False is returned in any other case. */ static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg) { unsigned int cmd; if (msg->addr > 0) return false; switch (msg->ctrl & ~PF) { case UI: case UIH: cmd = 0; if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1) break; switch (cmd & ~PF) { case CMD_FCOFF: case CMD_FCON: return true; } break; } return false; } /** * gsm_data_kick - poke the queue * @gsm: GSM Mux * * The tty device has called us to indicate that room has appeared in * the transmit queue. Ram more data into the pipe if we have any. * If we have been flow-stopped by a CMD_FCOFF, then we can only * send messages on DLCI0 until CMD_FCON. The caller must hold * the gsm tx lock. */ static int gsm_data_kick(struct gsm_mux *gsm) { struct gsm_msg *msg, *nmsg; struct gsm_dlci *dlci; int ret; clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); /* Serialize control messages and control channel messages first */ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) { if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg)) continue; ret = gsm_send_packet(gsm, msg); switch (ret) { case -ENOSPC: return -ENOSPC; case -ENODEV: /* ldisc not open */ gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; default: if (ret >= 0) { list_del(&msg->list); kfree(msg); } break; } } if (gsm->constipated) return -EAGAIN; /* Serialize other channels */ if (list_empty(&gsm->tx_data_list)) return 0; list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { dlci = gsm->dlci[msg->addr]; /* Send only messages for DLCIs with valid state */ if (dlci->state != DLCI_OPEN) { gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; } ret = gsm_send_packet(gsm, msg); switch (ret) { case -ENOSPC: return -ENOSPC; case -ENODEV: /* ldisc not open */ gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; default: if (ret >= 0) { list_del(&msg->list); kfree(msg); } break; } } return 1; } /** * __gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. The Caller must hold * the gsm tx lock. */ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { struct gsm_mux *gsm = dlci->gsm; u8 *dp = msg->data; u8 *fcs = dp + msg->len; /* Fill in the header */ if (gsm->encoding == GSM_BASIC_OPT) { if (msg->len < 128) *--dp = (msg->len << 1) | EA; else { *--dp = (msg->len >> 7); /* bits 7 - 15 */ *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ } } *--dp = msg->ctrl; if (gsm->initiator) *--dp = (msg->addr << 2) | CR | EA; else *--dp = (msg->addr << 2) | EA; *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp); /* Ugly protocol layering violation */ if (msg->ctrl == UI || msg->ctrl == (UI|PF)) *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len); *fcs = 0xFF - *fcs; gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl, msg->data, msg->len); /* Move the header back and adjust the length, also allow for the FCS now tacked on the end */ msg->len += (msg->data - dp) + 1; msg->data = dp; /* Add to the actual output queue */ switch (msg->ctrl & ~PF) { case UI: case UIH: if (msg->addr > 0) { list_add_tail(&msg->list, &gsm->tx_data_list); break; } fallthrough; default: list_add_tail(&msg->list, &gsm->tx_ctrl_list); break; } gsm->tx_bytes += msg->len; gsmld_write_trigger(gsm); mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100); } /** * gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. Take the * the gsm tx lock and dlci lock. */ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { unsigned long flags; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); __gsm_data_queue(dlci, msg); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /** * gsm_dlci_data_output - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles the usual tty * interface which is a byte stream with optional modem data. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int h, len, size; /* for modem bits without break data */ h = ((dlci->adaption == 1) ? 0 : 1); len = kfifo_len(&dlci->fifo); if (len == 0) return 0; /* MTU/MRU count only the data bits but watch adaption mode */ if ((len + h) > dlci->mtu) len = dlci->mtu - h; size = len + h; msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) return -ENOMEM; dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. * Always one byte as we never send inline break data */ *dp++ = (gsm_encode_modem(dlci) << 1) | EA; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); break; } WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len, &dlci->lock)); /* Notify upper layer about available send space. */ tty_port_tty_wakeup(&dlci->port); __gsm_data_queue(dlci, msg); /* Bytes of data we used up */ return size; } /** * gsm_dlci_data_output_framed - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles framed data * queued as skbuffs to the DLCI. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int len, size; int last = 0, first = 0; int overhead = 0; /* One byte per frame is used for B/F flags */ if (dlci->adaption == 4) overhead = 1; /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) return 0; first = 1; } len = dlci->skb->len + overhead; /* MTU/MRU count only the data bits */ if (len > dlci->mtu) { if (dlci->adaption == 3) { /* Over long frame, bin it */ dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; return 0; } len = dlci->mtu; } else last = 1; size = len + overhead; msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (msg == NULL) { skb_queue_tail(&dlci->skb_list, dlci->skb); dlci->skb = NULL; return -ENOMEM; } dp = msg->data; if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ /* Flag byte to carry the start/end info */ *dp++ = last << 7 | first << 6 | 1; /* EA */ len--; } memcpy(dp, dlci->skb->data, len); skb_pull(dlci->skb, len); __gsm_data_queue(dlci, msg); if (last) { dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; } return size; } /** * gsm_dlci_modem_output - try and push modem status out of a DLCI * @gsm: mux * @dlci: the DLCI to pull modem status from * @brk: break signal * * Push an empty frame in to the transmit queue to update the modem status * bits and to transmit an optional break. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, u8 brk) { u8 *dp = NULL; struct gsm_msg *msg; int size = 0; /* for modem bits without break data */ switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ size++; if (brk > 0) size++; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); return -EINVAL; } msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) { pr_err("%s: gsm_data_alloc error", __func__); return -ENOMEM; } dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ if (brk == 0) { *dp++ = (gsm_encode_modem(dlci) << 1) | EA; } else { *dp++ = gsm_encode_modem(dlci) << 1; *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */ } break; default: /* Handled above */ break; } __gsm_data_queue(dlci, msg); return size; } /** * gsm_dlci_data_sweep - look for data to send * @gsm: the GSM mux * * Sweep the GSM mux channels in priority order looking for ones with * data to send. We could do with optimising this scan a bit. We aim * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit * TX_THRESH_LO we get called again * * FIXME: We should round robin between groups and in theory you can * renegotiate DLCI priorities with optional stuff. Needs optimising. */ static int gsm_dlci_data_sweep(struct gsm_mux *gsm) { /* Priority ordering: We should do priority with RR of the groups */ int i, len, ret = 0; bool sent; struct gsm_dlci *dlci; while (gsm->tx_bytes < TX_THRESH_HI) { for (sent = false, i = 1; i < NUM_DLCI; i++) { dlci = gsm->dlci[i]; /* skip unused or blocked channel */ if (!dlci || dlci->constipated) continue; /* skip channels with invalid state */ if (dlci->state != DLCI_OPEN) continue; /* count the sent data per adaption */ if (dlci->adaption < 3 && !dlci->net) len = gsm_dlci_data_output(gsm, dlci); else len = gsm_dlci_data_output_framed(gsm, dlci); /* on error exit */ if (len < 0) return ret; if (len > 0) { ret++; sent = true; /* The lower DLCs can starve the higher DLCs! */ break; } /* try next */ } if (!sent) break; } return ret; } /** * gsm_dlci_data_kick - transmit if possible * @dlci: DLCI to kick * * Transmit data from this DLCI if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { unsigned long flags; int sweep; if (dlci->constipated) return; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { if (dlci->net) gsm_dlci_data_output_framed(dlci->gsm, dlci); else gsm_dlci_data_output(dlci->gsm, dlci); } if (sweep) gsm_dlci_data_sweep(dlci->gsm); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /* * Control message processing */ /** * gsm_control_command - send a command frame to a control * @gsm: gsm channel * @cmd: the command to use * @data: data to follow encoded info * @dlen: length of data * * Encode up and queue a UI/UIH frame containing our command. */ static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; struct gsm_dlci *dlci = gsm->dlci[0]; msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype); if (msg == NULL) return -ENOMEM; msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); gsm_data_queue(dlci, msg); return 0; } /** * gsm_control_reply - send a response frame to a control * @gsm: gsm channel * @cmd: the command to use * @data: data to follow encoded info * @dlen: length of data * * Encode up and queue a UI/UIH frame containing our response. */ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; struct gsm_dlci *dlci = gsm->dlci[0]; msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype); if (msg == NULL) return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); gsm_data_queue(dlci, msg); } /** * gsm_process_modem - process received modem status * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; /* The modem status command can either contain one octet (V.24 signals) * or two octets (V.24 signals + break signals). This is specified in * section 5.4.6.3.7 of the 07.10 mux spec. */ if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; modem = (modem >> 7) & 0x7f; } /* Flow control/ready to communicate */ fc = (modem & MDM_FC) || !(modem & MDM_RTR); if (fc && !dlci->constipated) { /* Need to throttle our output on this device */ dlci->constipated = true; } else if (!fc && dlci->constipated) { dlci->constipated = false; gsm_dlci_data_kick(dlci); } /* Map modem bits */ if (modem & MDM_RTC) mlines |= TIOCM_DSR | TIOCM_DTR; if (modem & MDM_RTR) mlines |= TIOCM_RTS | TIOCM_CTS; if (modem & MDM_IC) mlines |= TIOCM_RI; if (modem & MDM_DV) mlines |= TIOCM_CD; /* Carrier drop -> hangup */ if (tty) { if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD)) if (!C_CLOCAL(tty)) tty_hangup(tty); } if (brk & 0x01) tty_insert_flip_char(&dlci->port, 0, TTY_BREAK); dlci->modem_rx = mlines; wake_up_interruptible(&dlci->gsm->event); } /** * gsm_process_negotiation - process received parameters * @gsm: GSM channel * @addr: DLCI address * @cr: command/response * @params: encoded parameters from the parameter negotiation message * * Used when the response for our parameter negotiation command was * received. */ static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr, unsigned int cr, const struct gsm_dlci_param_bits *params) { struct gsm_dlci *dlci = gsm->dlci[addr]; unsigned int ftype, i, adaption, prio, n1, k; i = FIELD_GET(PN_I_CL_FIELD_FTYPE, params->i_cl_bits); adaption = FIELD_GET(PN_I_CL_FIELD_ADAPTION, params->i_cl_bits) + 1; prio = FIELD_GET(PN_P_FIELD_PRIO, params->p_bits); n1 = FIELD_GET(PN_N_FIELD_N1, get_unaligned_le16(¶ms->n_bits)); k = FIELD_GET(PN_K_FIELD_K, params->k_bits); if (n1 < MIN_MTU) { if (debug & DBG_ERRORS) pr_info("%s N1 out of range in PN\n", __func__); return -EINVAL; } switch (i) { case 0x00: ftype = UIH; break; case 0x01: ftype = UI; break; case 0x02: /* I frames are not supported */ if (debug & DBG_ERRORS) pr_info("%s unsupported I frame request in PN\n", __func__); gsm->unsupported++; return -EINVAL; default: if (debug & DBG_ERRORS) pr_info("%s i out of range in PN\n", __func__); return -EINVAL; } if (!cr && gsm->initiator) { if (adaption != dlci->adaption) { if (debug & DBG_ERRORS) pr_info("%s invalid adaption %d in PN\n", __func__, adaption); return -EINVAL; } if (prio != dlci->prio) { if (debug & DBG_ERRORS) pr_info("%s invalid priority %d in PN", __func__, prio); return -EINVAL; } if (n1 > gsm->mru || n1 > dlci->mtu) { /* We requested a frame size but the other party wants * to send larger frames. The standard allows only a * smaller response value than requested (5.4.6.3.1). */ if (debug & DBG_ERRORS) pr_info("%s invalid N1 %d in PN\n", __func__, n1); return -EINVAL; } dlci->mtu = n1; if (ftype != dlci->ftype) { if (debug & DBG_ERRORS) pr_info("%s invalid i %d in PN\n", __func__, i); return -EINVAL; } if (ftype != UI && ftype != UIH && k > dlci->k) { if (debug & DBG_ERRORS) pr_info("%s invalid k %d in PN\n", __func__, k); return -EINVAL; } dlci->k = k; } else if (cr && !gsm->initiator) { /* Only convergence layer type 1 and 2 are supported. */ if (adaption != 1 && adaption != 2) { if (debug & DBG_ERRORS) pr_info("%s invalid adaption %d in PN\n", __func__, adaption); return -EINVAL; } dlci->adaption = adaption; if (n1 > gsm->mru) { /* Propose a smaller value */ dlci->mtu = gsm->mru; } else if (n1 > MAX_MTU) { /* Propose a smaller value */ dlci->mtu = MAX_MTU; } else { dlci->mtu = n1; } dlci->prio = prio; dlci->ftype = ftype; dlci->k = k; } else { return -EINVAL; } return 0; } /** * gsm_control_modem - modem status received * @gsm: GSM channel * @data: data following command * @clen: command length * * We have received a modem status control message. This is used by * the GSM mux protocol to pass virtual modem line status and optionally * to indicate break signals. Unpack it, convert to Linux representation * and if need be stuff a break message down the tty. */ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) { unsigned int addr = 0; unsigned int modem = 0; struct gsm_dlci *dlci; int len = clen; int cl = clen; const u8 *dp = data; struct tty_struct *tty; len = gsm_read_ea_val(&addr, data, cl); if (len < 1) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; dlci = gsm->dlci[addr]; /* Must be at least one byte following the EA */ if ((cl - len) < 1) return; dp += len; cl -= len; /* get the modem status */ len = gsm_read_ea_val(&modem, dp, cl); if (len < 1) return; tty = tty_port_tty_get(&dlci->port); gsm_process_modem(tty, dlci, modem, cl); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } gsm_control_reply(gsm, CMD_MSC, data, clen); } /** * gsm_control_negotiation - parameter negotiation received * @gsm: GSM channel * @cr: command/response flag * @data: data following command * @dlen: data length * * We have received a parameter negotiation message. This is used by * the GSM mux protocol to configure protocol parameters for a new DLCI. */ static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr, const u8 *data, unsigned int dlen) { unsigned int addr; struct gsm_dlci_param_bits pn_reply; struct gsm_dlci *dlci; struct gsm_dlci_param_bits *params; if (dlen < sizeof(struct gsm_dlci_param_bits)) { gsm->open_error++; return; } /* Invalid DLCI? */ params = (struct gsm_dlci_param_bits *)data; addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits); if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) { gsm->open_error++; return; } dlci = gsm->dlci[addr]; /* Too late for parameter negotiation? */ if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) { gsm->open_error++; return; } /* Process the received parameters */ if (gsm_process_negotiation(gsm, addr, cr, params) != 0) { /* Negotiation failed. Close the link. */ if (debug & DBG_ERRORS) pr_info("%s PN failed\n", __func__); gsm->open_error++; gsm_dlci_close(dlci); return; } if (cr) { /* Reply command with accepted parameters. */ if (gsm_encode_params(dlci, &pn_reply) == 0) gsm_control_reply(gsm, CMD_PN, (const u8 *)&pn_reply, sizeof(pn_reply)); else if (debug & DBG_ERRORS) pr_info("%s PN invalid\n", __func__); } else if (dlci->state == DLCI_CONFIGURE) { /* Proceed with link setup by sending SABM before UA */ dlci->state = DLCI_OPENING; gsm_command(gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { if (debug & DBG_ERRORS) pr_info("%s PN in invalid state\n", __func__); gsm->open_error++; } } /** * gsm_control_rls - remote line status * @gsm: GSM channel * @data: data bytes * @clen: data length * * The modem sends us a two byte message on the control channel whenever * it wishes to send us an error state from the virtual link. Stuff * this into the uplink tty if present */ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen) { struct tty_port *port; unsigned int addr = 0; u8 bits; int len = clen; const u8 *dp = data; while (gsm_read_ea(&addr, *dp++) == 0) { len--; if (len == 0) return; } /* Must be at least one byte following ea */ len--; if (len <= 0) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; /* No error ? */ bits = *dp; if ((bits & 1) == 0) return; port = &gsm->dlci[addr]->port; if (bits & 2) tty_insert_flip_char(port, 0, TTY_OVERRUN); if (bits & 4) tty_insert_flip_char(port, 0, TTY_PARITY); if (bits & 8) tty_insert_flip_char(port, 0, TTY_FRAME); tty_flip_buffer_push(port); gsm_control_reply(gsm, CMD_RLS, data, clen); } static void gsm_dlci_begin_close(struct gsm_dlci *dlci); /** * gsm_control_message - DLCI 0 control processing * @gsm: our GSM mux * @command: the command EA * @data: data beyond the command/length EAs * @clen: length * * Input processor for control messages from the other end of the link. * Processes the incoming request and queues a response frame or an * NSC response if not supported */ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { u8 buf[1]; switch (command) { case CMD_CLD: { struct gsm_dlci *dlci = gsm->dlci[0]; /* Modem wishes to close down */ if (dlci) { dlci->dead = true; gsm->dead = true; gsm_dlci_begin_close(dlci); } } break; case CMD_TEST: /* Modem wishes to test, reply with the data */ gsm_control_reply(gsm, CMD_TEST, data, clen); break; case CMD_FCON: /* Modem can accept data again */ gsm->constipated = false; gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ gsmld_write_trigger(gsm); break; case CMD_FCOFF: /* Modem wants us to STFU */ gsm->constipated = true; gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); break; case CMD_MSC: /* Out of band modem line change indicator for a DLCI */ gsm_control_modem(gsm, data, clen); break; case CMD_RLS: /* Out of band error reception for a DLCI */ gsm_control_rls(gsm, data, clen); break; case CMD_PSC: /* Modem wishes to enter power saving state */ gsm_control_reply(gsm, CMD_PSC, NULL, 0); break; /* Optional commands */ case CMD_PN: /* Modem sends a parameter negotiation command */ gsm_control_negotiation(gsm, 1, data, clen); break; /* Optional unsupported commands */ case CMD_RPN: /* Remote port negotiation */ case CMD_SNC: /* Service negotiation command */ gsm->unsupported++; fallthrough; default: /* Reply to bad commands with an NSC */ buf[0] = command; gsm_control_reply(gsm, CMD_NSC, buf, 1); break; } } /** * gsm_control_response - process a response to our control * @gsm: our GSM mux * @command: the command (response) EA * @data: data beyond the command/length EA * @clen: length * * Process a response to an outstanding command. We only allow a single * control message in flight so this is fairly easy. All the clean up * is done by the caller, we just update the fields, flag it as done * and return */ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { struct gsm_control *ctrl; struct gsm_dlci *dlci; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; dlci = gsm->dlci[0]; command |= 1; /* Does the reply match our command */ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { /* Our command was replied to, kill the retry timer */ timer_delete(&gsm->t2_timer); gsm->pending_cmd = NULL; /* Rejected by the other end */ if (command == CMD_NSC) ctrl->error = -EOPNOTSUPP; ctrl->done = 1; wake_up(&gsm->event); /* Or did we receive the PN response to our PN command */ } else if (command == CMD_PN) { gsm_control_negotiation(gsm, 0, data, clen); /* Or did we receive the TEST response to our TEST command */ } else if (command == CMD_TEST && clen == 1 && *data == gsm->ka_num) { gsm->ka_retries = -1; /* trigger new keep-alive message */ if (dlci && !dlci->dead) mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_keep_alive - check timeout or start keep-alive * @t: timer contained in our gsm object * * Called off the keep-alive timer expiry signaling that our link * partner is not responding anymore. Link will be closed. * This is also called to startup our timer. */ static void gsm_control_keep_alive(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, ka_timer); unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); if (gsm->ka_num && gsm->ka_retries == 0) { /* Keep-alive expired -> close the link */ if (debug & DBG_ERRORS) pr_debug("%s keep-alive timed out\n", __func__); spin_unlock_irqrestore(&gsm->control_lock, flags); if (gsm->dlci[0]) gsm_dlci_begin_close(gsm->dlci[0]); return; } else if (gsm->keep_alive && gsm->dlci[0] && !gsm->dlci[0]->dead) { if (gsm->ka_retries > 0) { /* T2 expired for keep-alive -> resend */ gsm->ka_retries--; } else { /* Start keep-alive timer */ gsm->ka_num++; if (!gsm->ka_num) gsm->ka_num++; gsm->ka_retries = (signed int)gsm->n2; } gsm_control_command(gsm, CMD_TEST, &gsm->ka_num, sizeof(gsm->ka_num)); mod_timer(&gsm->ka_timer, jiffies + gsm->t2 * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_transmit - send control packet * @gsm: gsm mux * @ctrl: frame to send * * Send out a pending control command (called under control lock) */ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len); } /** * gsm_control_retransmit - retransmit a control frame * @t: timer contained in our gsm object * * Called off the T2 timer expiry in order to retransmit control frames * that have been lost in the system somewhere. The control_lock protects * us from colliding with another sender or a receive completion event. * In that situation the timer may still occur in a small window but * gsm->pending_cmd will be NULL and we just let the timer expire. */ static void gsm_control_retransmit(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, t2_timer); struct gsm_control *ctrl; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; if (ctrl) { if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) { gsm->pending_cmd = NULL; ctrl->error = -ETIMEDOUT; ctrl->done = 1; spin_unlock_irqrestore(&gsm->control_lock, flags); wake_up(&gsm->event); return; } gsm->cretries--; gsm_control_transmit(gsm, ctrl); mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_send - send a control frame on DLCI 0 * @gsm: the GSM channel * @command: command to send including CR bit * @data: bytes of data (must be kmalloced) * @clen: length of the block to send * * Queue and dispatch a control command. Only one command can be * active at a time. In theory more can be outstanding but the matching * gets really complicated so for now stick to one outstanding. */ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; retry: wait_event(gsm->event, gsm->pending_cmd == NULL); spin_lock_irqsave(&gsm->control_lock, flags); if (gsm->pending_cmd != NULL) { spin_unlock_irqrestore(&gsm->control_lock, flags); goto retry; } ctrl->cmd = command; ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; /* If DLCI0 is in ADM mode skip retries, it won't respond */ if (gsm->dlci[0]->mode == DLCI_MODE_ADM) gsm->cretries = 0; else gsm->cretries = gsm->n2; mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); return ctrl; } /** * gsm_control_wait - wait for a control to finish * @gsm: GSM mux * @control: control we are waiting on * * Waits for the control to complete or time out. Frees any used * resources and returns 0 for success, or an error if the remote * rejected or ignored the request. */ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) { int err; wait_event(gsm->event, control->done == 1); err = control->error; kfree(control); return err; } /* * DLCI level handling: Needs krefs */ /* * State transitions and timers */ /** * gsm_dlci_close - a DLCI has closed * @dlci: DLCI that closed * * Perform processing when moving a DLCI into closed state. If there * is an attached tty this is hung up */ static void gsm_dlci_close(struct gsm_dlci *dlci) { timer_delete(&dlci->t1); if (debug & DBG_ERRORS) pr_debug("DLCI %d goes closed.\n", dlci->addr); dlci->state = DLCI_CLOSED; /* Prevent us from sending data before the link is up again */ dlci->constipated = true; if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); gsm_dlci_clear_queues(dlci->gsm, dlci); /* Ensure that gsmtty_open() can return. */ tty_port_set_initialized(&dlci->port, false); wake_up_interruptible(&dlci->port.open_wait); } else { timer_delete(&dlci->gsm->ka_timer); dlci->gsm->dead = true; } /* A DLCI 0 close is a MUX termination so we need to kick that back to userspace somehow */ gsm_dlci_data_kick(dlci); wake_up_all(&dlci->gsm->event); } /** * gsm_dlci_open - a DLCI has opened * @dlci: DLCI that opened * * Perform processing when moving a DLCI into open state. */ static void gsm_dlci_open(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; /* Note that SABM UA .. SABM UA first UA lost can mean that we go open -> open */ timer_delete(&dlci->t1); /* This will let a tty open continue */ dlci->state = DLCI_OPEN; dlci->constipated = false; if (debug & DBG_ERRORS) pr_debug("DLCI %d goes open.\n", dlci->addr); /* Send current modem state */ if (dlci->addr) { gsm_modem_update(dlci, 0); } else { /* Start keep-alive control */ gsm->ka_num = 0; gsm->ka_retries = -1; mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100); } gsm_dlci_data_kick(dlci); wake_up(&dlci->gsm->event); } /** * gsm_dlci_negotiate - start parameter negotiation * @dlci: DLCI to open * * Starts the parameter negotiation for the new DLCI. This needs to be done * before the DLCI initialized the channel via SABM. */ static int gsm_dlci_negotiate(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; struct gsm_dlci_param_bits params; int ret; ret = gsm_encode_params(dlci, ¶ms); if (ret != 0) return ret; /* We cannot asynchronous wait for the command response with * gsm_command() and gsm_control_wait() at this point. */ ret = gsm_control_command(gsm, CMD_PN, (const u8 *)¶ms, sizeof(params)); return ret; } /** * gsm_dlci_t1 - T1 timer expiry * @t: timer contained in the DLCI that opened * * The T1 timer handles retransmits of control frames (essentially of * SABM and DISC). We resend the command until the retry count runs out * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) * * Some control dlci can stay in ADM mode with other dlci working just * fine. In that case we can just keep the control dlci open after the * DLCI_OPENING receives DM. */ static void gsm_dlci_t1(struct timer_list *t) { struct gsm_dlci *dlci = from_timer(dlci, t, t1); struct gsm_mux *gsm = dlci->gsm; switch (dlci->state) { case DLCI_CONFIGURE: if (dlci->retries && gsm_dlci_negotiate(dlci) == 0) { dlci->retries--; mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { gsm->open_error++; gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; case DLCI_OPENING: if (!dlci->addr && gsm->control == (DM | PF)) { if (debug & DBG_ERRORS) pr_info("DLCI 0 opening in ADM mode.\n"); dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else if (dlci->retries) { if (!dlci->addr || !gsm->dlci[0] || gsm->dlci[0]->state != DLCI_OPENING) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, SABM|PF); } mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { gsm->open_error++; gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; case DLCI_CLOSING: if (dlci->retries) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else gsm_dlci_close(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } } /** * gsm_dlci_begin_open - start channel open procedure * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages * to the modem which should then reply with a UA or ADM, at which point * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. * Parameter negotiation is performed before SABM if required. */ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci ? dlci->gsm : NULL; bool need_pn = false; if (!gsm) return; if (dlci->addr != 0) { if (gsm->adaption != 1 || gsm->adaption != dlci->adaption) need_pn = true; if (dlci->prio != (roundup(dlci->addr + 1, 8) - 1)) need_pn = true; if (gsm->ftype != dlci->ftype) need_pn = true; } switch (dlci->state) { case DLCI_CLOSED: case DLCI_WAITING_CONFIG: case DLCI_CLOSING: dlci->retries = gsm->n2; if (!need_pn) { dlci->state = DLCI_OPENING; if (!dlci->addr || !gsm->dlci[0] || gsm->dlci[0]->state != DLCI_OPENING) gsm_command(gsm, dlci->addr, SABM|PF); } else { /* Configure DLCI before setup */ dlci->state = DLCI_CONFIGURE; if (gsm_dlci_negotiate(dlci) != 0) { gsm_dlci_close(dlci); return; } } mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); break; default: break; } } /** * gsm_dlci_set_opening - change state to opening * @dlci: DLCI to open * * Change internal state to wait for DLCI open from initiator side. * We set off timers and responses upon reception of an SABM. */ static void gsm_dlci_set_opening(struct gsm_dlci *dlci) { switch (dlci->state) { case DLCI_CLOSED: case DLCI_WAITING_CONFIG: case DLCI_CLOSING: dlci->state = DLCI_OPENING; break; default: break; } } /** * gsm_dlci_set_wait_config - wait for channel configuration * @dlci: DLCI to configure * * Wait for a DLCI configuration from the application. */ static void gsm_dlci_set_wait_config(struct gsm_dlci *dlci) { switch (dlci->state) { case DLCI_CLOSED: case DLCI_CLOSING: dlci->state = DLCI_WAITING_CONFIG; break; default: break; } } /** * gsm_dlci_begin_close - start channel open procedure * @dlci: DLCI to open * * Commence closing a DLCI from the Linux side. We issue DISC messages * to the modem which should then reply with a UA, at which point we * will move into closed state. Closing is done asynchronously with retry * off timers. We may also receive a DM reply from the other end which * indicates the channel was already closed. */ static void gsm_dlci_begin_close(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING) return; dlci->retries = gsm->n2; dlci->state = DLCI_CLOSING; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); wake_up_interruptible(&gsm->event); } /** * gsm_dlci_data - data arrived * @dlci: channel * @data: block of bytes received * @clen: length of received block * * A UI or UIH frame has arrived which contains data for a channel * other than the control channel. If the relevant virtual tty is * open we shovel the bits down it, if not we drop them. */ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) { /* krefs .. */ struct tty_port *port = &dlci->port; struct tty_struct *tty; unsigned int modem = 0; int len; if (debug & DBG_TTY) pr_debug("%d bytes for tty\n", clen); switch (dlci->adaption) { /* Unsupported types */ case 4: /* Packetised interruptible data */ break; case 3: /* Packetised uininterruptible voice/data */ break; case 2: /* Asynchronous serial with line state in each frame */ len = gsm_read_ea_val(&modem, data, clen); if (len < 1) return; tty = tty_port_tty_get(port); if (tty) { gsm_process_modem(tty, dlci, modem, len); tty_wakeup(tty); tty_kref_put(tty); } /* Skip processed modem data */ data += len; clen -= len; fallthrough; case 1: /* Line state will go via DLCI 0 controls only */ default: tty_insert_flip_string(port, data, clen); tty_flip_buffer_push(port); } } /** * gsm_dlci_command - data arrived on control channel * @dlci: channel * @data: block of bytes received * @len: length of received block * * A UI or UIH frame has arrived which contains data for DLCI 0 the * control channel. This should contain a command EA followed by * control data bytes. The command EA contains a command/response bit * and we divide up the work accordingly. */ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len) { /* See what command is involved */ unsigned int command = 0; unsigned int clen = 0; unsigned int dlen; /* read the command */ dlen = gsm_read_ea_val(&command, data, len); len -= dlen; data += dlen; /* read any control data */ dlen = gsm_read_ea_val(&clen, data, len); len -= dlen; data += dlen; /* Malformed command? */ if (clen > len) { dlci->gsm->malformed++; return; } if (command & 1) gsm_control_message(dlci->gsm, command, data, clen); else gsm_control_response(dlci->gsm, command, data, clen); } /** * gsm_kick_timer - transmit if possible * @t: timer contained in our gsm object * * Transmit data from DLCIs if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_kick_timer(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, kick_timer); unsigned long flags; int sent = 0; spin_lock_irqsave(&gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ if (gsm->tx_bytes < TX_THRESH_LO) sent = gsm_dlci_data_sweep(gsm); spin_unlock_irqrestore(&gsm->tx_lock, flags); if (sent && debug & DBG_DATA) pr_info("%s TX queue stalled\n", __func__); } /** * gsm_dlci_copy_config_values - copy DLCI configuration * @dlci: source DLCI * @dc: configuration structure to fill */ static void gsm_dlci_copy_config_values(struct gsm_dlci *dlci, struct gsm_dlci_config *dc) { memset(dc, 0, sizeof(*dc)); dc->channel = (u32)dlci->addr; dc->adaption = (u32)dlci->adaption; dc->mtu = (u32)dlci->mtu; dc->priority = (u32)dlci->prio; if (dlci->ftype == UIH) dc->i = 1; else dc->i = 2; dc->k = (u32)dlci->k; } /** * gsm_dlci_config - configure DLCI from configuration * @dlci: DLCI to configure * @dc: DLCI configuration * @open: open DLCI after configuration? */ static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, int open) { struct gsm_mux *gsm; bool need_restart = false; bool need_open = false; unsigned int i; /* * Check that userspace doesn't put stuff in here to prevent breakages * in the future. */ for (i = 0; i < ARRAY_SIZE(dc->reserved); i++) if (dc->reserved[i]) return -EINVAL; if (!dlci) return -EINVAL; gsm = dlci->gsm; /* Stuff we don't support yet - I frame transport */ if (dc->adaption != 1 && dc->adaption != 2) return -EOPNOTSUPP; if (dc->mtu > MAX_MTU || dc->mtu < MIN_MTU || dc->mtu > gsm->mru) return -EINVAL; if (dc->priority >= 64) return -EINVAL; if (dc->i == 0 || dc->i > 2) /* UIH and UI only */ return -EINVAL; if (dc->k > 7) return -EINVAL; if (dc->flags & ~GSM_FL_RESTART) /* allow future extensions */ return -EINVAL; /* * See what is needed for reconfiguration */ /* Framing fields */ if (dc->adaption != dlci->adaption) need_restart = true; if (dc->mtu != dlci->mtu) need_restart = true; if (dc->i != dlci->ftype) need_restart = true; /* Requires care */ if (dc->priority != dlci->prio) need_restart = true; if (dc->flags & GSM_FL_RESTART) need_restart = true; if ((open && gsm->wait_config) || need_restart) need_open = true; if (dlci->state == DLCI_WAITING_CONFIG) { need_restart = false; need_open = true; } /* * Close down what is needed, restart and initiate the new * configuration. */ if (need_restart) { gsm_dlci_begin_close(dlci); wait_event_interruptible(gsm->event, dlci->state == DLCI_CLOSED); if (signal_pending(current)) return -EINTR; } /* * Setup the new configuration values */ dlci->adaption = (int)dc->adaption; if (dc->mtu) dlci->mtu = (unsigned int)dc->mtu; else dlci->mtu = gsm->mtu; if (dc->priority) dlci->prio = (u8)dc->priority; else dlci->prio = roundup(dlci->addr + 1, 8) - 1; if (dc->i == 1) dlci->ftype = UIH; else if (dc->i == 2) dlci->ftype = UI; if (dc->k) dlci->k = (u8)dc->k; else dlci->k = gsm->k; if (need_open) { if (gsm->initiator) gsm_dlci_begin_open(dlci); else gsm_dlci_set_opening(dlci); } return 0; } /* * Allocate/Free DLCI channels */ /** * gsm_dlci_alloc - allocate a DLCI * @gsm: GSM mux * @addr: address of the DLCI * * Allocate and install a new DLCI object into the GSM mux. * * FIXME: review locking races */ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) { struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC); if (dlci == NULL) return NULL; spin_lock_init(&dlci->lock); mutex_init(&dlci->mutex); if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) { kfree(dlci); return NULL; } skb_queue_head_init(&dlci->skb_list); timer_setup(&dlci->t1, gsm_dlci_t1, 0); tty_port_init(&dlci->port); dlci->port.ops = &gsm_port_ops; dlci->gsm = gsm; dlci->addr = addr; dlci->adaption = gsm->adaption; dlci->mtu = gsm->mtu; if (addr == 0) dlci->prio = 0; else dlci->prio = roundup(addr + 1, 8) - 1; dlci->ftype = gsm->ftype; dlci->k = gsm->k; dlci->state = DLCI_CLOSED; if (addr) { dlci->data = gsm_dlci_data; /* Prevent us from sending data before the link is up */ dlci->constipated = true; } else { dlci->data = gsm_dlci_command; } gsm->dlci[addr] = dlci; return dlci; } /** * gsm_dlci_free - free DLCI * @port: tty port for DLCI to free * * Free up a DLCI. * * Can sleep. */ static void gsm_dlci_free(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); timer_shutdown_sync(&dlci->t1); dlci->gsm->dlci[dlci->addr] = NULL; kfifo_free(&dlci->fifo); while ((dlci->skb = skb_dequeue(&dlci->skb_list))) dev_kfree_skb(dlci->skb); kfree(dlci); } static inline void dlci_get(struct gsm_dlci *dlci) { tty_port_get(&dlci->port); } static inline void dlci_put(struct gsm_dlci *dlci) { tty_port_put(&dlci->port); } static void gsm_destroy_network(struct gsm_dlci *dlci); /** * gsm_dlci_release - release DLCI * @dlci: DLCI to destroy * * Release a DLCI. Actual free is deferred until either * mux is closed or tty is closed - whichever is last. * * Can sleep. */ static void gsm_dlci_release(struct gsm_dlci *dlci) { struct tty_struct *tty = tty_port_tty_get(&dlci->port); if (tty) { mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); /* We cannot use tty_hangup() because in tty_kref_put() the tty * driver assumes that the hangup queue is free and reuses it to * queue release_one_tty() -> NULL pointer panic in * process_one_work(). */ tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); } dlci->state = DLCI_CLOSED; dlci_put(dlci); } /* * LAPBish link layer logic */ /** * gsm_queue - a GSM frame is ready to process * @gsm: pointer to our gsm mux * * At this point in time a frame has arrived and been demangled from * the line encoding. All the differences between the encodings have * been handled below us and the frame is unpacked into the structures. * The fcs holds the header FCS but any data FCS must be added here. */ static void gsm_queue(struct gsm_mux *gsm) { struct gsm_dlci *dlci; u8 cr; int address; if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & DBG_DATA) pr_debug("BAD FCS %02x\n", gsm->fcs); return; } address = gsm->address >> 1; if (address >= NUM_DLCI) goto invalid; cr = gsm->address & 1; /* C/R bit */ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); dlci = gsm->dlci[address]; switch (gsm->control) { case SABM|PF: if (cr == 1) { gsm->open_error++; goto invalid; } if (dlci == NULL) dlci = gsm_dlci_alloc(gsm, address); if (dlci == NULL) { gsm->open_error++; return; } if (dlci->dead) gsm_response(gsm, address, DM|PF); else { gsm_response(gsm, address, UA|PF); gsm_dlci_open(dlci); } break; case DISC|PF: if (cr == 1) goto invalid; if (dlci == NULL || dlci->state == DLCI_CLOSED) { gsm_response(gsm, address, DM|PF); return; } /* Real close complete */ gsm_response(gsm, address, UA|PF); gsm_dlci_close(dlci); break; case UA|PF: if (cr == 0 || dlci == NULL) break; switch (dlci->state) { case DLCI_CLOSING: gsm_dlci_close(dlci); break; case DLCI_OPENING: gsm_dlci_open(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } break; case DM: /* DM can be valid unsolicited */ case DM|PF: if (cr) goto invalid; if (dlci == NULL) return; gsm_dlci_close(dlci); break; case UI: case UI|PF: case UIH: case UIH|PF: if (dlci == NULL || dlci->state != DLCI_OPEN) { gsm_response(gsm, address, DM|PF); return; } dlci->data(dlci, gsm->buf, gsm->len); break; default: goto invalid; } return; invalid: gsm->malformed++; return; } /** * gsm0_receive_state_check_and_fix - check and correct receive state * @gsm: gsm data for this ldisc instance * * Ensures that the current receive state is valid for basic option mode. */ static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm) { switch (gsm->state) { case GSM_SEARCH: case GSM0_ADDRESS: case GSM0_CONTROL: case GSM0_LEN0: case GSM0_LEN1: case GSM0_DATA: case GSM0_FCS: case GSM0_SSOF: break; default: gsm->state = GSM_SEARCH; break; } } /** * gsm0_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in gsm mode 0 */ static void gsm0_receive(struct gsm_mux *gsm, u8 c) { unsigned int len; gsm0_receive_state_check_and_fix(gsm); switch (gsm->state) { case GSM_SEARCH: /* SOF marker */ if (c == GSM0_SOF) { gsm->state = GSM0_ADDRESS; gsm->address = 0; gsm->len = 0; gsm->fcs = INIT_FCS; } break; case GSM0_ADDRESS: /* Address EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM0_CONTROL; break; case GSM0_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->state = GSM0_LEN0; break; case GSM0_LEN0: /* Length EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->len, c)) { if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM0_FCS; else gsm->state = GSM0_DATA; break; } gsm->state = GSM0_LEN1; break; case GSM0_LEN1: gsm->fcs = gsm_fcs_add(gsm->fcs, c); len = c; gsm->len |= len << 7; if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM0_FCS; else gsm->state = GSM0_DATA; break; case GSM0_DATA: /* Data */ gsm->buf[gsm->count++] = c; if (gsm->count >= MAX_MRU) { gsm->bad_size++; gsm->state = GSM_SEARCH; } else if (gsm->count >= gsm->len) { /* Calculate final FCS for UI frames over all data */ if ((gsm->control & ~PF) != UIH) { gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } gsm->state = GSM0_FCS; } break; case GSM0_FCS: /* FCS follows the packet */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->state = GSM0_SSOF; break; case GSM0_SSOF: gsm->state = GSM_SEARCH; if (c == GSM0_SOF) gsm_queue(gsm); else gsm->bad_size++; break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm1_receive_state_check_and_fix - check and correct receive state * @gsm: gsm data for this ldisc instance * * Ensures that the current receive state is valid for advanced option mode. */ static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm) { switch (gsm->state) { case GSM_SEARCH: case GSM1_START: case GSM1_ADDRESS: case GSM1_CONTROL: case GSM1_DATA: case GSM1_OVERRUN: break; default: gsm->state = GSM_SEARCH; break; } } /** * gsm1_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in mode 1 (Advanced option) */ static void gsm1_receive(struct gsm_mux *gsm, u8 c) { gsm1_receive_state_check_and_fix(gsm); /* handle XON/XOFF */ if ((c & ISO_IEC_646_MASK) == XON) { gsm->constipated = true; return; } else if ((c & ISO_IEC_646_MASK) == XOFF) { gsm->constipated = false; /* Kick the link in case it is idling */ gsmld_write_trigger(gsm); return; } if (c == GSM1_SOF) { /* EOF is only valid in frame if we have got to the data state */ if (gsm->state == GSM1_DATA) { if (gsm->count < 1) { /* Missing FSC */ gsm->malformed++; gsm->state = GSM1_START; return; } /* Remove the FCS from data */ gsm->count--; if ((gsm->control & ~PF) != UIH) { /* Calculate final FCS for UI frames over all * data but FCS */ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } /* Add the FCS itself to test against GOOD_FCS */ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); gsm->len = gsm->count; gsm_queue(gsm); gsm->state = GSM1_START; return; } /* Any partial frame was a runt so go back to start */ if (gsm->state != GSM1_START) { if (gsm->state != GSM_SEARCH) gsm->malformed++; gsm->state = GSM1_START; } /* A SOF in GSM_START means we are still reading idling or framing bytes */ return; } if (c == GSM1_ESCAPE) { gsm->escape = true; return; } /* Only an unescaped SOF gets us out of GSM search */ if (gsm->state == GSM_SEARCH) return; if (gsm->escape) { c ^= GSM1_ESCAPE_BITS; gsm->escape = false; } switch (gsm->state) { case GSM1_START: /* First byte after SOF */ gsm->address = 0; gsm->state = GSM1_ADDRESS; gsm->fcs = INIT_FCS; fallthrough; case GSM1_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM1_CONTROL; break; case GSM1_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->count = 0; gsm->state = GSM1_DATA; break; case GSM1_DATA: /* Data */ if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */ gsm->state = GSM1_OVERRUN; gsm->bad_size++; } else gsm->buf[gsm->count++] = c; break; case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */ break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm_error - handle tty error * @gsm: ldisc data * * Handle an error in the receipt of data for a frame. Currently we just * go back to hunting for a SOF. * * FIXME: better diagnostics ? */ static void gsm_error(struct gsm_mux *gsm) { gsm->state = GSM_SEARCH; gsm->io_error++; } /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); wait_event(gsm->event, dlci->state == DLCI_CLOSED); } dlci->dead = true; } /* Finish outstanding timers, making sure they are done */ timer_delete_sync(&gsm->kick_timer); timer_delete_sync(&gsm->t2_timer); timer_delete_sync(&gsm->ka_timer); /* Finish writing to ldisc */ flush_work(&gsm->tx_work); /* Free up any link layer users and finally the control channel */ if (gsm->has_devices) { gsm_unregister_devices(gsm_tty_driver, gsm->num); gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); guard(spinlock_irqsave)(&gsm->tx_lock); list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_ctrl_list); list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_data_list); } /** * gsm_activate_mux - generic GSM setup * @gsm: our mux * * Set up the bits of the mux which are the same for all framing * protocols. Add the mux to the mux table so it can be opened and * finally kick off connecting to DLCI 0 on the modem. */ static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; int ret; dlci = gsm_dlci_alloc(gsm, 0); if (dlci == NULL) return -ENOMEM; if (gsm->encoding == GSM_BASIC_OPT) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; ret = gsm_register_devices(gsm_tty_driver, gsm->num); if (ret) return ret; gsm->has_devices = true; gsm->dead = false; /* Tty opens are now permissible */ return 0; } /** * gsm_free_mux - free up a mux * @gsm: mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_mux(struct gsm_mux *gsm) { int i; for (i = 0; i < MAX_MUX; i++) { if (gsm == gsm_mux[i]) { gsm_mux[i] = NULL; break; } } mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); } /** * gsm_free_muxr - free up a mux * @ref: kreference to the mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_muxr(struct kref *ref) { struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref); gsm_free_mux(gsm); } static inline void mux_get(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_get(&gsm->ref); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline void mux_put(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_put(&gsm->ref, gsm_free_muxr); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) { return gsm->num * NUM_DLCI; } static inline unsigned int mux_line_to_num(unsigned int line) { return line / NUM_DLCI; } /** * gsm_alloc_mux - allocate a mux * * Creates a new mux ready for activation. */ static struct gsm_mux *gsm_alloc_mux(void) { int i; struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); if (gsm == NULL) return NULL; gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL); if (gsm->buf == NULL) { kfree(gsm); return NULL; } gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); if (gsm->txframe == NULL) { kfree(gsm->buf); kfree(gsm); return NULL; } spin_lock_init(&gsm->lock); mutex_init(&gsm->mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_ctrl_list); INIT_LIST_HEAD(&gsm->tx_data_list); timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); timer_setup(&gsm->ka_timer, gsm_control_keep_alive, 0); INIT_WORK(&gsm->tx_work, gsmld_write_task); init_waitqueue_head(&gsm->event); spin_lock_init(&gsm->control_lock); spin_lock_init(&gsm->tx_lock); gsm->t1 = T1; gsm->t2 = T2; gsm->t3 = T3; gsm->n2 = N2; gsm->k = K; gsm->ftype = UIH; gsm->adaption = 1; gsm->encoding = GSM_ADV_OPT; gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ gsm->mtu = 64; gsm->dead = true; /* Avoid early tty opens */ gsm->wait_config = false; /* Disabled */ gsm->keep_alive = 0; /* Disabled */ /* Store the instance to the mux array or abort if no space is * available. */ spin_lock(&gsm_mux_lock); for (i = 0; i < MAX_MUX; i++) { if (!gsm_mux[i]) { gsm_mux[i] = gsm; gsm->num = i; break; } } spin_unlock(&gsm_mux_lock); if (i == MAX_MUX) { mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); return NULL; } return gsm; } static void gsm_copy_config_values(struct gsm_mux *gsm, struct gsm_config *c) { memset(c, 0, sizeof(*c)); c->adaption = gsm->adaption; c->encapsulation = gsm->encoding; c->initiator = gsm->initiator; c->t1 = gsm->t1; c->t2 = gsm->t2; c->t3 = gsm->t3; c->n2 = gsm->n2; if (gsm->ftype == UIH) c->i = 1; else c->i = 2; pr_debug("Ftype %d i %d\n", gsm->ftype, c->i); c->mru = gsm->mru; c->mtu = gsm->mtu; c->k = gsm->k; } static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) { int need_close = 0; int need_restart = 0; /* Stuff we don't support yet - UI or I frame transport */ if (c->adaption != 1 && c->adaption != 2) return -EOPNOTSUPP; /* Check the MRU/MTU range looks sane */ if (c->mru < MIN_MTU || c->mtu < MIN_MTU) return -EINVAL; if (c->mru > MAX_MRU || c->mtu > MAX_MTU) return -EINVAL; if (c->t3 > MAX_T3) return -EINVAL; if (c->n2 > 255) return -EINVAL; if (c->encapsulation > 1) /* Basic, advanced, no I */ return -EINVAL; if (c->initiator > 1) return -EINVAL; if (c->k > MAX_WINDOW_SIZE) return -EINVAL; if (c->i == 0 || c->i > 2) /* UIH and UI only */ return -EINVAL; /* * See what is needed for reconfiguration */ /* Timing fields */ if (c->t1 != 0 && c->t1 != gsm->t1) need_restart = 1; if (c->t2 != 0 && c->t2 != gsm->t2) need_restart = 1; if (c->encapsulation != gsm->encoding) need_restart = 1; if (c->adaption != gsm->adaption) need_restart = 1; /* Requires care */ if (c->initiator != gsm->initiator) need_close = 1; if (c->mru != gsm->mru) need_restart = 1; if (c->mtu != gsm->mtu) need_restart = 1; /* * Close down what is needed, restart and initiate the new * configuration. On the first time there is no DLCI[0] * and closing or cleaning up is not necessary. */ if (need_close || need_restart) gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; gsm->mtu = c->mtu; gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT; gsm->adaption = c->adaption; gsm->n2 = c->n2; if (c->i == 1) gsm->ftype = UIH; else if (c->i == 2) gsm->ftype = UI; if (c->t1) gsm->t1 = c->t1; if (c->t2) gsm->t2 = c->t2; if (c->t3) gsm->t3 = c->t3; if (c->k) gsm->k = c->k; /* * FIXME: We need to separate activation/deactivation from adding * and removing from the mux array */ if (gsm->dead) { int ret = gsm_activate_mux(gsm); if (ret) return ret; if (gsm->initiator) gsm_dlci_begin_open(gsm->dlci[0]); } return 0; } static void gsm_copy_config_ext_values(struct gsm_mux *gsm, struct gsm_config_ext *ce) { memset(ce, 0, sizeof(*ce)); ce->wait_config = gsm->wait_config ? 1 : 0; ce->keep_alive = gsm->keep_alive; } static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce) { bool need_restart = false; unsigned int i; /* * Check that userspace doesn't put stuff in here to prevent breakages * in the future. */ for (i = 0; i < ARRAY_SIZE(ce->reserved); i++) if (ce->reserved[i]) return -EINVAL; if (ce->flags & ~GSM_FL_RESTART) return -EINVAL; /* Requires care */ if (ce->flags & GSM_FL_RESTART) need_restart = true; /* * Close down what is needed, restart and initiate the new * configuration. On the first time there is no DLCI[0] * and closing or cleaning up is not necessary. */ if (need_restart) gsm_cleanup_mux(gsm, true); /* * Setup the new configuration values */ gsm->wait_config = ce->wait_config ? true : false; gsm->keep_alive = ce->keep_alive; if (gsm->dead) { int ret = gsm_activate_mux(gsm); if (ret) return ret; if (gsm->initiator) gsm_dlci_begin_open(gsm->dlci[0]); } return 0; } /** * gsmld_output - write to link * @gsm: our mux * @data: bytes to output * @len: size * * Write a block of data from the GSM mux to the data channel. This * will eventually be serialized from above but at the moment isn't. */ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len) { if (tty_write_room(gsm->tty) < len) { set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); return -ENOSPC; } if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, data, len); return gsm->tty->ops->write(gsm->tty, data, len); } /** * gsmld_write_trigger - schedule ldisc write task * @gsm: our mux */ static void gsmld_write_trigger(struct gsm_mux *gsm) { if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead) return; schedule_work(&gsm->tx_work); } /** * gsmld_write_task - ldisc write task * @work: our tx write work * * Writes out data to the ldisc if possible. We are doing this here to * avoid dead-locking. This returns if no space or data is left for output. */ static void gsmld_write_task(struct work_struct *work) { struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work); unsigned long flags; int i, ret; /* All outstanding control channel and control messages and one data * frame is sent. */ ret = -ENODEV; spin_lock_irqsave(&gsm->tx_lock, flags); if (gsm->tty) ret = gsm_data_kick(gsm); spin_unlock_irqrestore(&gsm->tx_lock, flags); if (ret >= 0) for (i = 0; i < NUM_DLCI; i++) if (gsm->dlci[i]) tty_port_tty_wakeup(&gsm->dlci[i]->port); } /** * gsmld_attach_gsm - mode set up * @tty: our tty structure * @gsm: our mux * * Set up the MUX for basic mode and commence connecting to the * modem. Currently called from the line discipline set up but * will need moving to an ioctl path. */ static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { gsm->tty = tty_kref_get(tty); /* Turn off tty XON/XOFF handling to handle it explicitly. */ gsm->old_c_iflag = tty->termios.c_iflag; tty->termios.c_iflag &= (IXON | IXOFF); } /** * gsmld_detach_gsm - stop doing 0710 mux * @tty: tty attached to the mux * @gsm: mux * * Shutdown and then clean up the resources used by the line discipline */ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { WARN_ON(tty != gsm->tty); /* Restore tty XON/XOFF handling. */ gsm->tty->termios.c_iflag = gsm->old_c_iflag; tty_kref_put(gsm->tty); gsm->tty = NULL; } static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct gsm_mux *gsm = tty->disc_data; u8 flags = TTY_NORMAL; if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, cp, count); for (; count; count--, cp++) { if (fp) flags = *fp++; switch (flags) { case TTY_NORMAL: if (gsm->receive) gsm->receive(gsm, *cp); break; case TTY_OVERRUN: case TTY_BREAK: case TTY_PARITY: case TTY_FRAME: gsm_error(gsm); break; default: WARN_ONCE(1, "%s: unknown flag %d\n", tty_name(tty), flags); break; } } /* FASYNC if needed ? */ /* If clogged call tty_throttle(tty); */ } /** * gsmld_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the line discipline is * being closed, when the tty layer wants the buffer flushed (eg * at hangup). */ static void gsmld_flush_buffer(struct tty_struct *tty) { } /** * gsmld_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void gsmld_close(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; /* The ldisc locks and closes the port before calling our close. This * means we have no way to do a proper disconnect. We will not bother * to do one. */ gsm_cleanup_mux(gsm, false); gsmld_detach_gsm(tty, gsm); gsmld_flush_buffer(tty); /* Do other clean up here */ mux_put(gsm); } /** * gsmld_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int gsmld_open(struct tty_struct *tty) { struct gsm_mux *gsm; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EINVAL; /* Attach our ldisc data */ gsm = gsm_alloc_mux(); if (gsm == NULL) return -ENOMEM; tty->disc_data = gsm; tty->receive_room = 65536; /* Attach the initial passive connection */ gsmld_attach_gsm(tty, gsm); /* The mux will not be activated yet, we wait for correct * configuration first. */ if (gsm->encoding == GSM_BASIC_OPT) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; return 0; } /** * gsmld_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void gsmld_write_wakeup(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; /* Queue poll */ gsmld_write_trigger(gsm); } /** * gsmld_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * @cookie: unused * @offset: unused * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. */ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return -EOPNOTSUPP; } /** * gsmld_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Called when the owner of the device wants to send a frame * itself (or some other control data). The data is transferred * as-is and must be properly framed and checksummed as appropriate * by userspace. Frames are either sent whole or not at all as this * avoids pain user side. */ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const u8 *buf, size_t nr) { struct gsm_mux *gsm = tty->disc_data; unsigned long flags; size_t space; int ret; if (!gsm) return -ENODEV; ret = -ENOBUFS; spin_lock_irqsave(&gsm->tx_lock, flags); space = tty_write_room(tty); if (space >= nr) ret = tty->ops->write(tty, buf, nr); else set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_unlock_irqrestore(&gsm->tx_lock, flags); return ret; } /** * gsmld_poll - poll method for N_GSM0710 * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { __poll_t mask = 0; struct gsm_mux *gsm = tty->disc_data; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (gsm->dead) mask |= EPOLLHUP; if (tty_hung_up_p(file)) mask |= EPOLLHUP; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct gsm_config c; struct gsm_config_ext ce; struct gsm_dlci_config dc; struct gsm_mux *gsm = tty->disc_data; unsigned int base, addr; struct gsm_dlci *dlci; switch (cmd) { case GSMIOC_GETCONF: gsm_copy_config_values(gsm, &c); if (copy_to_user((void __user *)arg, &c, sizeof(c))) return -EFAULT; return 0; case GSMIOC_SETCONF: if (copy_from_user(&c, (void __user *)arg, sizeof(c))) return -EFAULT; return gsm_config(gsm, &c); case GSMIOC_GETFIRST: base = mux_num_to_base(gsm); return put_user(base + 1, (__u32 __user *)arg); case GSMIOC_GETCONF_EXT: gsm_copy_config_ext_values(gsm, &ce); if (copy_to_user((void __user *)arg, &ce, sizeof(ce))) return -EFAULT; return 0; case GSMIOC_SETCONF_EXT: if (copy_from_user(&ce, (void __user *)arg, sizeof(ce))) return -EFAULT; return gsm_config_ext(gsm, &ce); case GSMIOC_GETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel == 0 || dc.channel >= NUM_DLCI) return -EINVAL; addr = array_index_nospec(dc.channel, NUM_DLCI); dlci = gsm->dlci[addr]; if (!dlci) { dlci = gsm_dlci_alloc(gsm, addr); if (!dlci) return -ENOMEM; } gsm_dlci_copy_config_values(dlci, &dc); if (copy_to_user((void __user *)arg, &dc, sizeof(dc))) return -EFAULT; return 0; case GSMIOC_SETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel == 0 || dc.channel >= NUM_DLCI) return -EINVAL; addr = array_index_nospec(dc.channel, NUM_DLCI); dlci = gsm->dlci[addr]; if (!dlci) { dlci = gsm_dlci_alloc(gsm, addr); if (!dlci) return -ENOMEM; } return gsm_dlci_config(dlci, &dc, 0); default: return n_tty_ioctl_helper(tty, cmd, arg); } } /* * Network interface * */ static int gsm_mux_net_open(struct net_device *net) { pr_debug("%s called\n", __func__); netif_start_queue(net); return 0; } static int gsm_mux_net_close(struct net_device *net) { netif_stop_queue(net); return 0; } static void dlci_net_free(struct gsm_dlci *dlci) { if (!dlci->net) { WARN_ON(1); return; } dlci->adaption = dlci->prev_adaption; dlci->data = dlci->prev_data; free_netdev(dlci->net); dlci->net = NULL; } static void net_free(struct kref *ref) { struct gsm_mux_net *mux_net; struct gsm_dlci *dlci; mux_net = container_of(ref, struct gsm_mux_net, ref); dlci = mux_net->dlci; if (dlci->net) { unregister_netdev(dlci->net); dlci_net_free(dlci); } } static inline void muxnet_get(struct gsm_mux_net *mux_net) { kref_get(&mux_net->ref); } static inline void muxnet_put(struct gsm_mux_net *mux_net) { kref_put(&mux_net->ref, net_free); } static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct gsm_mux_net *mux_net = netdev_priv(net); struct gsm_dlci *dlci = mux_net->dlci; muxnet_get(mux_net); skb_queue_head(&dlci->skb_list, skb); net->stats.tx_packets++; net->stats.tx_bytes += skb->len; gsm_dlci_data_kick(dlci); /* And tell the kernel when the last transmit started. */ netif_trans_update(net); muxnet_put(mux_net); return NETDEV_TX_OK; } /* called when a packet did not ack after watchdogtimeout */ static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue) { /* Tell syslog we are hosed. */ dev_dbg(&net->dev, "Tx timed out.\n"); /* Update statistics */ net->stats.tx_errors++; } static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const u8 *in_buf, int size) { struct net_device *net = dlci->net; struct sk_buff *skb; struct gsm_mux_net *mux_net = netdev_priv(net); muxnet_get(mux_net); /* Allocate an sk_buff */ skb = dev_alloc_skb(size + NET_IP_ALIGN); if (!skb) { /* We got no receive buffer. */ net->stats.rx_dropped++; muxnet_put(mux_net); return; } skb_reserve(skb, NET_IP_ALIGN); skb_put_data(skb, in_buf, size); skb->dev = net; skb->protocol = htons(ETH_P_IP); /* Ship it off to the kernel */ netif_rx(skb); /* update out statistics */ net->stats.rx_packets++; net->stats.rx_bytes += size; muxnet_put(mux_net); return; } static void gsm_mux_net_init(struct net_device *net) { static const struct net_device_ops gsm_netdev_ops = { .ndo_open = gsm_mux_net_open, .ndo_stop = gsm_mux_net_close, .ndo_start_xmit = gsm_mux_net_start_xmit, .ndo_tx_timeout = gsm_mux_net_tx_timeout, }; net->netdev_ops = &gsm_netdev_ops; /* fill in the other fields */ net->watchdog_timeo = GSM_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->tx_queue_len = 10; } /* caller holds the dlci mutex */ static void gsm_destroy_network(struct gsm_dlci *dlci) { struct gsm_mux_net *mux_net; pr_debug("destroy network interface\n"); if (!dlci->net) return; mux_net = netdev_priv(dlci->net); muxnet_put(mux_net); } /* caller holds the dlci mutex */ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) { char *netname; int retval = 0; struct net_device *net; struct gsm_mux_net *mux_net; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Already in a non tty mode */ if (dlci->adaption > 2) return -EBUSY; if (nc->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; if (nc->adaption != 3 && nc->adaption != 4) return -EPROTONOSUPPORT; pr_debug("create network interface\n"); netname = "gsm%d"; if (nc->if_name[0] != '\0') netname = nc->if_name; net = alloc_netdev(sizeof(struct gsm_mux_net), netname, NET_NAME_UNKNOWN, gsm_mux_net_init); if (!net) { pr_err("alloc_netdev failed\n"); return -ENOMEM; } net->mtu = dlci->mtu; net->min_mtu = MIN_MTU; net->max_mtu = dlci->mtu; mux_net = netdev_priv(net); mux_net->dlci = dlci; kref_init(&mux_net->ref); strscpy(nc->if_name, net->name); /* return net name */ /* reconfigure dlci for network */ dlci->prev_adaption = dlci->adaption; dlci->prev_data = dlci->data; dlci->adaption = nc->adaption; dlci->data = gsm_mux_rx_netchar; dlci->net = net; pr_debug("register netdev\n"); retval = register_netdev(net); if (retval) { pr_err("network register fail %d\n", retval); dlci_net_free(dlci); return retval; } return net->ifindex; /* return network index */ } /* Line discipline for real tty */ static struct tty_ldisc_ops tty_ldisc_packet = { .owner = THIS_MODULE, .num = N_GSM0710, .name = "n_gsm", .open = gsmld_open, .close = gsmld_close, .flush_buffer = gsmld_flush_buffer, .read = gsmld_read, .write = gsmld_write, .ioctl = gsmld_ioctl, .poll = gsmld_poll, .receive_buf = gsmld_receive_buf, .write_wakeup = gsmld_write_wakeup }; /* * Virtual tty side */ /** * gsm_modem_upd_via_data - send modem bits via convergence layer * @dlci: channel * @brk: break signal * * Send an empty frame to signal mobile state changes and to transmit the * break signal for adaption 2. */ static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { struct gsm_mux *gsm = dlci->gsm; unsigned long flags; if (dlci->state != DLCI_OPEN || dlci->adaption != 2) return; spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_modem_output(gsm, dlci, brk); spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsm_modem_upd_via_msc - send modem bits via control frame * @dlci: channel * @brk: break signal */ sta |