// SPDX-License-Identifier: GPL-2.0-or-later
      /*
       * ChaCha20-Poly1305 AEAD, RFC7539
       *
       * Copyright (C) 2015 Martin Willi
       */
      
      #include <crypto/internal/aead.h>
      #include <crypto/internal/hash.h>
      #include <crypto/internal/skcipher.h>
      #include <crypto/scatterwalk.h>
      #include <crypto/chacha.h>
      #include <crypto/poly1305.h>
      #include <linux/err.h>
      #include <linux/init.h>
      #include <linux/kernel.h>
      #include <linux/module.h>
      
      #include "internal.h"
      
      struct chachapoly_instance_ctx {
              struct crypto_skcipher_spawn chacha;
              struct crypto_ahash_spawn poly;
              unsigned int saltlen;
      };
      
      struct chachapoly_ctx {
              struct crypto_skcipher *chacha;
              struct crypto_ahash *poly;
              /* key bytes we use for the ChaCha20 IV */
              unsigned int saltlen;
              u8 salt[];
      };
      
      struct poly_req {
              /* zero byte padding for AD/ciphertext, as needed */
              u8 pad[POLY1305_BLOCK_SIZE];
              /* tail data with AD/ciphertext lengths */
              struct {
                      __le64 assoclen;
                      __le64 cryptlen;
              } tail;
              struct scatterlist src[1];
              struct ahash_request req; /* must be last member */
      };
      
      struct chacha_req {
              u8 iv[CHACHA_IV_SIZE];
              struct scatterlist src[1];
              struct skcipher_request req; /* must be last member */
      };
      
      struct chachapoly_req_ctx {
              struct scatterlist src[2];
              struct scatterlist dst[2];
              /* the key we generate for Poly1305 using Chacha20 */
              u8 key[POLY1305_KEY_SIZE];
              /* calculated Poly1305 tag */
              u8 tag[POLY1305_DIGEST_SIZE];
              /* length of data to en/decrypt, without ICV */
              unsigned int cryptlen;
              /* Actual AD, excluding IV */
              unsigned int assoclen;
              /* request flags, with MAY_SLEEP cleared if needed */
              u32 flags;
              union {
                      struct poly_req poly;
                      struct chacha_req chacha;
              } u;
      };
      
      static inline void async_done_continue(struct aead_request *req, int err,
                                             int (*cont)(struct aead_request *))
      {
              if (!err) {
                      struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
      
                      rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                      err = cont(req);
              }
      
              if (err != -EINPROGRESS && err != -EBUSY)
                      aead_request_complete(req, err);
      }
      
      static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              __le32 leicb = cpu_to_le32(icb);
      
              memcpy(iv, &leicb, sizeof(leicb));
              memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen);
              memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
                     CHACHA_IV_SIZE - sizeof(leicb) - ctx->saltlen);
      }
      
      static int poly_verify_tag(struct aead_request *req)
      {
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              u8 tag[sizeof(rctx->tag)];
      
              scatterwalk_map_and_copy(tag, req->src,
                                       req->assoclen + rctx->cryptlen,
                                       sizeof(tag), 0);
              if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
                      return -EBADMSG;
              return 0;
      }
      
      static int poly_copy_tag(struct aead_request *req)
      {
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
      
              scatterwalk_map_and_copy(rctx->tag, req->dst,
                                       req->assoclen + rctx->cryptlen,
                                       sizeof(rctx->tag), 1);
              return 0;
      }
      
      static void chacha_decrypt_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_verify_tag);
      }
      
      static int chacha_decrypt(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct chacha_req *creq = &rctx->u.chacha;
              struct scatterlist *src, *dst;
              int err;
      
              if (rctx->cryptlen == 0)
                      goto skip;
      
              chacha_iv(creq->iv, req, 1);
      
              src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
              dst = src;
              if (req->src != req->dst)
                      dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
      
              skcipher_request_set_callback(&creq->req, rctx->flags,
                                            chacha_decrypt_done, req);
              skcipher_request_set_tfm(&creq->req, ctx->chacha);
              skcipher_request_set_crypt(&creq->req, src, dst,
                                         rctx->cryptlen, creq->iv);
              err = crypto_skcipher_decrypt(&creq->req);
              if (err)
                      return err;
      
      skip:
              return poly_verify_tag(req);
      }
      
      static int poly_tail_continue(struct aead_request *req)
      {
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
      
              if (rctx->cryptlen == req->cryptlen) /* encrypting */
                      return poly_copy_tag(req);
      
              return chacha_decrypt(req);
      }
      
      static void poly_tail_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_tail_continue);
      }
      
      static int poly_tail(struct aead_request *req)
      {
              struct crypto_aead *tfm = crypto_aead_reqtfm(req);
              struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              int err;
      
              preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
              preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
              sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_tail_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, preq->src,
                                      rctx->tag, sizeof(preq->tail));
      
              err = crypto_ahash_finup(&preq->req);
              if (err)
                      return err;
      
              return poly_tail_continue(req);
      }
      
      static void poly_cipherpad_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_tail);
      }
      
      static int poly_cipherpad(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              unsigned int padlen;
              int err;
      
              padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
              memset(preq->pad, 0, sizeof(preq->pad));
              sg_init_one(preq->src, preq->pad, padlen);
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_cipherpad_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
      
              err = crypto_ahash_update(&preq->req);
              if (err)
                      return err;
      
              return poly_tail(req);
      }
      
      static void poly_cipher_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_cipherpad);
      }
      
      static int poly_cipher(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              struct scatterlist *crypt = req->src;
              int err;
      
              if (rctx->cryptlen == req->cryptlen) /* encrypting */
                      crypt = req->dst;
      
              crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_cipher_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
      
              err = crypto_ahash_update(&preq->req);
              if (err)
                      return err;
      
              return poly_cipherpad(req);
      }
      
      static void poly_adpad_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_cipher);
      }
      
      static int poly_adpad(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              unsigned int padlen;
              int err;
      
              padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
              memset(preq->pad, 0, sizeof(preq->pad));
              sg_init_one(preq->src, preq->pad, padlen);
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_adpad_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
      
              err = crypto_ahash_update(&preq->req);
              if (err)
                      return err;
      
              return poly_cipher(req);
      }
      
      static void poly_ad_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_adpad);
      }
      
      static int poly_ad(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              int err;
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_ad_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
      
              err = crypto_ahash_update(&preq->req);
              if (err)
                      return err;
      
              return poly_adpad(req);
      }
      
      static void poly_setkey_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_ad);
      }
      
      static int poly_setkey(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              int err;
      
              sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_setkey_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
              ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
      
              err = crypto_ahash_update(&preq->req);
              if (err)
                      return err;
      
              return poly_ad(req);
      }
      
      static void poly_init_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_setkey);
      }
      
      static int poly_init(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct poly_req *preq = &rctx->u.poly;
              int err;
      
              ahash_request_set_callback(&preq->req, rctx->flags,
                                         poly_init_done, req);
              ahash_request_set_tfm(&preq->req, ctx->poly);
      
              err = crypto_ahash_init(&preq->req);
              if (err)
                      return err;
      
              return poly_setkey(req);
      }
      
      static void poly_genkey_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_init);
      }
      
      static int poly_genkey(struct aead_request *req)
      {
              struct crypto_aead *tfm = crypto_aead_reqtfm(req);
              struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct chacha_req *creq = &rctx->u.chacha;
              int err;
      
              rctx->assoclen = req->assoclen;
      
              if (crypto_aead_ivsize(tfm) == 8) {
                      if (rctx->assoclen < 8)
                              return -EINVAL;
                      rctx->assoclen -= 8;
              }
      
              memset(rctx->key, 0, sizeof(rctx->key));
              sg_init_one(creq->src, rctx->key, sizeof(rctx->key));
      
              chacha_iv(creq->iv, req, 0);
      
              skcipher_request_set_callback(&creq->req, rctx->flags,
                                            poly_genkey_done, req);
              skcipher_request_set_tfm(&creq->req, ctx->chacha);
              skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
                                         POLY1305_KEY_SIZE, creq->iv);
      
              err = crypto_skcipher_decrypt(&creq->req);
              if (err)
                      return err;
      
              return poly_init(req);
      }
      
      static void chacha_encrypt_done(struct crypto_async_request *areq, int err)
      {
              async_done_continue(areq->data, err, poly_genkey);
      }
      
      static int chacha_encrypt(struct aead_request *req)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
              struct chacha_req *creq = &rctx->u.chacha;
              struct scatterlist *src, *dst;
              int err;
      
              if (req->cryptlen == 0)
                      goto skip;
      
              chacha_iv(creq->iv, req, 1);
      
              src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
              dst = src;
              if (req->src != req->dst)
                      dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
      
              skcipher_request_set_callback(&creq->req, rctx->flags,
                                            chacha_encrypt_done, req);
              skcipher_request_set_tfm(&creq->req, ctx->chacha);
              skcipher_request_set_crypt(&creq->req, src, dst,
                                         req->cryptlen, creq->iv);
              err = crypto_skcipher_encrypt(&creq->req);
              if (err)
                      return err;
      
      skip:
              return poly_genkey(req);
      }
      
      static int chachapoly_encrypt(struct aead_request *req)
      {
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
      
              rctx->cryptlen = req->cryptlen;
              rctx->flags = aead_request_flags(req);
      
              /* encrypt call chain:
               * - chacha_encrypt/done()
               * - poly_genkey/done()
               * - poly_init/done()
               * - poly_setkey/done()
               * - poly_ad/done()
               * - poly_adpad/done()
               * - poly_cipher/done()
               * - poly_cipherpad/done()
               * - poly_tail/done/continue()
               * - poly_copy_tag()
               */
              return chacha_encrypt(req);
      }
      
      static int chachapoly_decrypt(struct aead_request *req)
      {
              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
      
              rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
              rctx->flags = aead_request_flags(req);
      
              /* decrypt call chain:
               * - poly_genkey/done()
               * - poly_init/done()
               * - poly_setkey/done()
               * - poly_ad/done()
               * - poly_adpad/done()
               * - poly_cipher/done()
               * - poly_cipherpad/done()
               * - poly_tail/done/continue()
               * - chacha_decrypt/done()
               * - poly_verify_tag()
               */
              return poly_genkey(req);
      }
      
      static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
                                   unsigned int keylen)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
              int err;
      
              if (keylen != ctx->saltlen + CHACHA_KEY_SIZE)
                      return -EINVAL;
      
              keylen -= ctx->saltlen;
              memcpy(ctx->salt, key + keylen, ctx->saltlen);
      
              crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
              crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
                                                     CRYPTO_TFM_REQ_MASK);
      
              err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
              crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
                                          CRYPTO_TFM_RES_MASK);
              return err;
      }
      
      static int chachapoly_setauthsize(struct crypto_aead *tfm,
                                        unsigned int authsize)
      {
              if (authsize != POLY1305_DIGEST_SIZE)
                      return -EINVAL;
      
              return 0;
      }
      
      static int chachapoly_init(struct crypto_aead *tfm)
      {
    1         struct aead_instance *inst = aead_alg_instance(tfm);
              struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
              struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
              struct crypto_skcipher *chacha;
              struct crypto_ahash *poly;
              unsigned long align;
      
              poly = crypto_spawn_ahash(&ictx->poly);
              if (IS_ERR(poly))
                      return PTR_ERR(poly);
      
    1         chacha = crypto_spawn_skcipher(&ictx->chacha);
              if (IS_ERR(chacha)) {
                      crypto_free_ahash(poly);
                      return PTR_ERR(chacha);
              }
      
    1         ctx->chacha = chacha;
              ctx->poly = poly;
              ctx->saltlen = ictx->saltlen;
      
              align = crypto_aead_alignmask(tfm);
              align &= ~(crypto_tfm_ctx_alignment() - 1);
              crypto_aead_set_reqsize(
                      tfm,
                      align + offsetof(struct chachapoly_req_ctx, u) +
                      max(offsetof(struct chacha_req, req) +
                          sizeof(struct skcipher_request) +
                          crypto_skcipher_reqsize(chacha),
                          offsetof(struct poly_req, req) +
                          sizeof(struct ahash_request) +
                          crypto_ahash_reqsize(poly)));
      
    1         return 0;
      }
      
      static void chachapoly_exit(struct crypto_aead *tfm)
      {
              struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
      
    1         crypto_free_ahash(ctx->poly);
              crypto_free_skcipher(ctx->chacha);
      }
      
      static void chachapoly_free(struct aead_instance *inst)
      {
              struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
      
              crypto_drop_skcipher(&ctx->chacha);
              crypto_drop_ahash(&ctx->poly);
              kfree(inst);
      }
      
      static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
                                   const char *name, unsigned int ivsize)
      {
              struct crypto_attr_type *algt;
              struct aead_instance *inst;
              struct skcipher_alg *chacha;
              struct crypto_alg *poly;
              struct hash_alg_common *poly_hash;
              struct chachapoly_instance_ctx *ctx;
              const char *chacha_name, *poly_name;
              int err;
      
              if (ivsize > CHACHAPOLY_IV_SIZE)
                      return -EINVAL;
      
              algt = crypto_get_attr_type(tb);
              if (IS_ERR(algt))
                      return PTR_ERR(algt);
      
              if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
                      return -EINVAL;
      
              chacha_name = crypto_attr_alg_name(tb[1]);
              if (IS_ERR(chacha_name))
                      return PTR_ERR(chacha_name);
              poly_name = crypto_attr_alg_name(tb[2]);
              if (IS_ERR(poly_name))
                      return PTR_ERR(poly_name);
      
              poly = crypto_find_alg(poly_name, &crypto_ahash_type,
                                     CRYPTO_ALG_TYPE_HASH,
                                     CRYPTO_ALG_TYPE_AHASH_MASK |
                                     crypto_requires_sync(algt->type,
                                                          algt->mask));
              if (IS_ERR(poly))
                      return PTR_ERR(poly);
              poly_hash = __crypto_hash_alg_common(poly);
      
              err = -EINVAL;
              if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
                      goto out_put_poly;
      
              err = -ENOMEM;
              inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
              if (!inst)
                      goto out_put_poly;
      
              ctx = aead_instance_ctx(inst);
              ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
              err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
                                            aead_crypto_instance(inst));
              if (err)
                      goto err_free_inst;
      
              crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
              err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
                                         crypto_requires_sync(algt->type,
                                                              algt->mask));
              if (err)
                      goto err_drop_poly;
      
              chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
      
              err = -EINVAL;
              /* Need 16-byte IV size, including Initial Block Counter value */
              if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
                      goto out_drop_chacha;
              /* Not a stream cipher? */
              if (chacha->base.cra_blocksize != 1)
                      goto out_drop_chacha;
      
              err = -ENAMETOOLONG;
              if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
                           "%s(%s,%s)", name, chacha->base.cra_name,
                           poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
                      goto out_drop_chacha;
              if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                           "%s(%s,%s)", name, chacha->base.cra_driver_name,
                           poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                      goto out_drop_chacha;
      
              inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
                                         CRYPTO_ALG_ASYNC;
              inst->alg.base.cra_priority = (chacha->base.cra_priority +
                                             poly->cra_priority) / 2;
              inst->alg.base.cra_blocksize = 1;
              inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
                                             poly->cra_alignmask;
              inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
                                           ctx->saltlen;
              inst->alg.ivsize = ivsize;
              inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
              inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
              inst->alg.init = chachapoly_init;
              inst->alg.exit = chachapoly_exit;
              inst->alg.encrypt = chachapoly_encrypt;
              inst->alg.decrypt = chachapoly_decrypt;
              inst->alg.setkey = chachapoly_setkey;
              inst->alg.setauthsize = chachapoly_setauthsize;
      
              inst->free = chachapoly_free;
      
              err = aead_register_instance(tmpl, inst);
              if (err)
                      goto out_drop_chacha;
      
      out_put_poly:
              crypto_mod_put(poly);
              return err;
      
      out_drop_chacha:
              crypto_drop_skcipher(&ctx->chacha);
      err_drop_poly:
              crypto_drop_ahash(&ctx->poly);
      err_free_inst:
              kfree(inst);
              goto out_put_poly;
      }
      
      static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
      {
              return chachapoly_create(tmpl, tb, "rfc7539", 12);
      }
      
      static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
      {
              return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
      }
      
      static struct crypto_template rfc7539_tmpls[] = {
              {
                      .name = "rfc7539",
                      .create = rfc7539_create,
                      .module = THIS_MODULE,
              }, {
                      .name = "rfc7539esp",
                      .create = rfc7539esp_create,
                      .module = THIS_MODULE,
              },
      };
      
      static int __init chacha20poly1305_module_init(void)
      {
              return crypto_register_templates(rfc7539_tmpls,
                                               ARRAY_SIZE(rfc7539_tmpls));
      }
      
      static void __exit chacha20poly1305_module_exit(void)
      {
              crypto_unregister_templates(rfc7539_tmpls,
                                          ARRAY_SIZE(rfc7539_tmpls));
      }
      
      subsys_initcall(chacha20poly1305_module_init);
      module_exit(chacha20poly1305_module_exit);
      
      MODULE_LICENSE("GPL");
      MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
      MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
      MODULE_ALIAS_CRYPTO("rfc7539");
      MODULE_ALIAS_CRYPTO("rfc7539esp");
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       * AppArmor security module
       *
       * This file contains AppArmor mediation of files
       *
       * Copyright (C) 1998-2008 Novell/SUSE
       * Copyright 2009-2010 Canonical Ltd.
       */
      
      #include <linux/tty.h>
      #include <linux/fdtable.h>
      #include <linux/file.h>
      
      #include "include/apparmor.h"
      #include "include/audit.h"
      #include "include/cred.h"
      #include "include/file.h"
      #include "include/match.h"
      #include "include/net.h"
      #include "include/path.h"
      #include "include/policy.h"
      #include "include/label.h"
      
      static u32 map_mask_to_chr_mask(u32 mask)
      {
              u32 m = mask & PERMS_CHRS_MASK;
      
              if (mask & AA_MAY_GETATTR)
                      m |= MAY_READ;
              if (mask & (AA_MAY_SETATTR | AA_MAY_CHMOD | AA_MAY_CHOWN))
                      m |= MAY_WRITE;
      
              return m;
      }
      
      /**
       * audit_file_mask - convert mask to permission string
       * @buffer: buffer to write string to (NOT NULL)
       * @mask: permission mask to convert
       */
      static void audit_file_mask(struct audit_buffer *ab, u32 mask)
      {
              char str[10];
      
              aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
                                  map_mask_to_chr_mask(mask));
              audit_log_string(ab, str);
      }
      
      /**
       * file_audit_cb - call back for file specific audit fields
       * @ab: audit_buffer  (NOT NULL)
       * @va: audit struct to audit values of  (NOT NULL)
       */
      static void file_audit_cb(struct audit_buffer *ab, void *va)
      {
              struct common_audit_data *sa = va;
              kuid_t fsuid = current_fsuid();
      
              if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
                      audit_log_format(ab, " requested_mask=");
                      audit_file_mask(ab, aad(sa)->request);
              }
              if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
                      audit_log_format(ab, " denied_mask=");
                      audit_file_mask(ab, aad(sa)->denied);
              }
              if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
                      audit_log_format(ab, " fsuid=%d",
                                       from_kuid(&init_user_ns, fsuid));
                      audit_log_format(ab, " ouid=%d",
                                       from_kuid(&init_user_ns, aad(sa)->fs.ouid));
              }
      
              if (aad(sa)->peer) {
                      audit_log_format(ab, " target=");
                      aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
                                      FLAG_VIEW_SUBNS, GFP_KERNEL);
              } else if (aad(sa)->fs.target) {
                      audit_log_format(ab, " target=");
                      audit_log_untrustedstring(ab, aad(sa)->fs.target);
              }
      }
      
      /**
       * aa_audit_file - handle the auditing of file operations
       * @profile: the profile being enforced  (NOT NULL)
       * @perms: the permissions computed for the request (NOT NULL)
       * @op: operation being mediated
       * @request: permissions requested
       * @name: name of object being mediated (MAYBE NULL)
       * @target: name of target (MAYBE NULL)
       * @tlabel: target label (MAY BE NULL)
       * @ouid: object uid
       * @info: extra information message (MAYBE NULL)
       * @error: 0 if operation allowed else failure error code
       *
       * Returns: %0 or error on failure
       */
      int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
                        const char *op, u32 request, const char *name,
                        const char *target, struct aa_label *tlabel,
                        kuid_t ouid, const char *info, int error)
      {
              int type = AUDIT_APPARMOR_AUTO;
              DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, op);
      
              sa.u.tsk = NULL;
              aad(&sa)->request = request;
              aad(&sa)->name = name;
              aad(&sa)->fs.target = target;
              aad(&sa)->peer = tlabel;
              aad(&sa)->fs.ouid = ouid;
              aad(&sa)->info = info;
              aad(&sa)->error = error;
              sa.u.tsk = NULL;
      
              if (likely(!aad(&sa)->error)) {
                      u32 mask = perms->audit;
      
                      if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
                              mask = 0xffff;
      
                      /* mask off perms that are not being force audited */
                      aad(&sa)->request &= mask;
      
                      if (likely(!aad(&sa)->request))
                              return 0;
                      type = AUDIT_APPARMOR_AUDIT;
              } else {
                      /* only report permissions that were denied */
                      aad(&sa)->request = aad(&sa)->request & ~perms->allow;
                      AA_BUG(!aad(&sa)->request);
      
                      if (aad(&sa)->request & perms->kill)
                              type = AUDIT_APPARMOR_KILL;
      
                      /* quiet known rejects, assumes quiet and kill do not overlap */
                      if ((aad(&sa)->request & perms->quiet) &&
                          AUDIT_MODE(profile) != AUDIT_NOQUIET &&
                          AUDIT_MODE(profile) != AUDIT_ALL)
                              aad(&sa)->request &= ~perms->quiet;
      
                      if (!aad(&sa)->request)
                              return aad(&sa)->error;
              }
      
              aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
              return aa_audit(type, profile, &sa, file_audit_cb);
      }
      
      /**
       * is_deleted - test if a file has been completely unlinked
       * @dentry: dentry of file to test for deletion  (NOT NULL)
       *
       * Returns: %1 if deleted else %0
       */
      static inline bool is_deleted(struct dentry *dentry)
      {
              if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
                      return 1;
              return 0;
      }
      
      static int path_name(const char *op, struct aa_label *label,
                           const struct path *path, int flags, char *buffer,
                           const char **name, struct path_cond *cond, u32 request)
      {
              struct aa_profile *profile;
              const char *info = NULL;
              int error;
      
              error = aa_path_name(path, flags, buffer, name, &info,
                                   labels_profile(label)->disconnected);
              if (error) {
                      fn_for_each_confined(label, profile,
                              aa_audit_file(profile, &nullperms, op, request, *name,
                                            NULL, NULL, cond->uid, info, error));
                      return error;
              }
      
              return 0;
      }
      
      /**
       * map_old_perms - map old file perms layout to the new layout
       * @old: permission set in old mapping
       *
       * Returns: new permission mapping
       */
      static u32 map_old_perms(u32 old)
      {
              u32 new = old & 0xf;
              if (old & MAY_READ)
                      new |= AA_MAY_GETATTR | AA_MAY_OPEN;
              if (old & MAY_WRITE)
                      new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
                             AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
              if (old & 0x10)
                      new |= AA_MAY_LINK;
              /* the old mapping lock and link_subset flags where overlaid
               * and use was determined by part of a pair that they were in
               */
              if (old & 0x20)
                      new |= AA_MAY_LOCK | AA_LINK_SUBSET;
              if (old & 0x40)        /* AA_EXEC_MMAP */
                      new |= AA_EXEC_MMAP;
      
              return new;
      }
      
      /**
       * aa_compute_fperms - convert dfa compressed perms to internal perms
       * @dfa: dfa to compute perms for   (NOT NULL)
       * @state: state in dfa
       * @cond:  conditions to consider  (NOT NULL)
       *
       * TODO: convert from dfa + state to permission entry, do computation conversion
       *       at load time.
       *
       * Returns: computed permission set
       */
      struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
                                        struct path_cond *cond)
      {
              /* FIXME: change over to new dfa format
               * currently file perms are encoded in the dfa, new format
               * splits the permissions from the dfa.  This mapping can be
               * done at profile load
               */
              struct aa_perms perms = { };
      
              if (uid_eq(current_fsuid(), cond->uid)) {
                      perms.allow = map_old_perms(dfa_user_allow(dfa, state));
                      perms.audit = map_old_perms(dfa_user_audit(dfa, state));
                      perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
                      perms.xindex = dfa_user_xindex(dfa, state);
              } else {
                      perms.allow = map_old_perms(dfa_other_allow(dfa, state));
                      perms.audit = map_old_perms(dfa_other_audit(dfa, state));
                      perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
                      perms.xindex = dfa_other_xindex(dfa, state);
              }
              perms.allow |= AA_MAY_GETATTR;
      
              /* change_profile wasn't determined by ownership in old mapping */
              if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
                      perms.allow |= AA_MAY_CHANGE_PROFILE;
              if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
                      perms.allow |= AA_MAY_ONEXEC;
      
              return perms;
      }
      
      /**
       * aa_str_perms - find permission that match @name
       * @dfa: to match against  (MAYBE NULL)
       * @state: state to start matching in
       * @name: string to match against dfa  (NOT NULL)
       * @cond: conditions to consider for permission set computation  (NOT NULL)
       * @perms: Returns - the permissions found when matching @name
       *
       * Returns: the final state in @dfa when beginning @start and walking @name
       */
      unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
                                const char *name, struct path_cond *cond,
                                struct aa_perms *perms)
      {
              unsigned int state;
              state = aa_dfa_match(dfa, start, name);
              *perms = aa_compute_fperms(dfa, state, cond);
      
              return state;
      }
      
      int __aa_path_perm(const char *op, struct aa_profile *profile, const char *name,
                         u32 request, struct path_cond *cond, int flags,
                         struct aa_perms *perms)
      {
              int e = 0;
      
              if (profile_unconfined(profile))
                      return 0;
              aa_str_perms(profile->file.dfa, profile->file.start, name, cond, perms);
              if (request & ~perms->allow)
                      e = -EACCES;
              return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
                                   cond->uid, NULL, e);
      }
      
      
      static int profile_path_perm(const char *op, struct aa_profile *profile,
                                   const struct path *path, char *buffer, u32 request,
                                   struct path_cond *cond, int flags,
                                   struct aa_perms *perms)
      {
              const char *name;
              int error;
      
              if (profile_unconfined(profile))
                      return 0;
      
              error = path_name(op, &profile->label, path,
                                flags | profile->path_flags, buffer, &name, cond,
                                request);
              if (error)
                      return error;
              return __aa_path_perm(op, profile, name, request, cond, flags,
                                    perms);
      }
      
      /**
       * aa_path_perm - do permissions check & audit for @path
       * @op: operation being checked
       * @label: profile being enforced  (NOT NULL)
       * @path: path to check permissions of  (NOT NULL)
       * @flags: any additional path flags beyond what the profile specifies
       * @request: requested permissions
       * @cond: conditional info for this request  (NOT NULL)
       *
       * Returns: %0 else error if access denied or other error
       */
      int aa_path_perm(const char *op, struct aa_label *label,
                       const struct path *path, int flags, u32 request,
                       struct path_cond *cond)
      {
              struct aa_perms perms = {};
              struct aa_profile *profile;
              char *buffer = NULL;
              int error;
      
              flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
                                                                      0);
              buffer = aa_get_buffer(false);
              if (!buffer)
                      return -ENOMEM;
              error = fn_for_each_confined(label, profile,
                              profile_path_perm(op, profile, path, buffer, request,
                                                cond, flags, &perms));
      
              aa_put_buffer(buffer);
      
              return error;
      }
      
      /**
       * xindex_is_subset - helper for aa_path_link
       * @link: link permission set
       * @target: target permission set
       *
       * test target x permissions are equal OR a subset of link x permissions
       * this is done as part of the subset test, where a hardlink must have
       * a subset of permissions that the target has.
       *
       * Returns: %1 if subset else %0
       */
      static inline bool xindex_is_subset(u32 link, u32 target)
      {
              if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
                  ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
                      return 0;
      
              return 1;
      }
      
      static int profile_path_link(struct aa_profile *profile,
                                   const struct path *link, char *buffer,
                                   const struct path *target, char *buffer2,
                                   struct path_cond *cond)
      {
              const char *lname, *tname = NULL;
              struct aa_perms lperms = {}, perms;
              const char *info = NULL;
              u32 request = AA_MAY_LINK;
              unsigned int state;
              int error;
      
              error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
                                buffer, &lname, cond, AA_MAY_LINK);
              if (error)
                      goto audit;
      
              /* buffer2 freed below, tname is pointer in buffer2 */
              error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
                                buffer2, &tname, cond, AA_MAY_LINK);
              if (error)
                      goto audit;
      
              error = -EACCES;
              /* aa_str_perms - handles the case of the dfa being NULL */
              state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
                                   cond, &lperms);
      
              if (!(lperms.allow & AA_MAY_LINK))
                      goto audit;
      
              /* test to see if target can be paired with link */
              state = aa_dfa_null_transition(profile->file.dfa, state);
              aa_str_perms(profile->file.dfa, state, tname, cond, &perms);
      
              /* force audit/quiet masks for link are stored in the second entry
               * in the link pair.
               */
              lperms.audit = perms.audit;
              lperms.quiet = perms.quiet;
              lperms.kill = perms.kill;
      
              if (!(perms.allow & AA_MAY_LINK)) {
                      info = "target restricted";
                      lperms = perms;
                      goto audit;
              }
      
              /* done if link subset test is not required */
              if (!(perms.allow & AA_LINK_SUBSET))
                      goto done_tests;
      
              /* Do link perm subset test requiring allowed permission on link are
               * a subset of the allowed permissions on target.
               */
              aa_str_perms(profile->file.dfa, profile->file.start, tname, cond,
                           &perms);
      
              /* AA_MAY_LINK is not considered in the subset test */
              request = lperms.allow & ~AA_MAY_LINK;
              lperms.allow &= perms.allow | AA_MAY_LINK;
      
              request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
              if (request & ~lperms.allow) {
                      goto audit;
              } else if ((lperms.allow & MAY_EXEC) &&
                         !xindex_is_subset(lperms.xindex, perms.xindex)) {
                      lperms.allow &= ~MAY_EXEC;
                      request |= MAY_EXEC;
                      info = "link not subset of target";
                      goto audit;
              }
      
      done_tests:
              error = 0;
      
      audit:
              return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
                                   NULL, cond->uid, info, error);
      }
      
      /**
       * aa_path_link - Handle hard link permission check
       * @label: the label being enforced  (NOT NULL)
       * @old_dentry: the target dentry  (NOT NULL)
       * @new_dir: directory the new link will be created in  (NOT NULL)
       * @new_dentry: the link being created  (NOT NULL)
       *
       * Handle the permission test for a link & target pair.  Permission
       * is encoded as a pair where the link permission is determined
       * first, and if allowed, the target is tested.  The target test
       * is done from the point of the link match (not start of DFA)
       * making the target permission dependent on the link permission match.
       *
       * The subset test if required forces that permissions granted
       * on link are a subset of the permission granted to target.
       *
       * Returns: %0 if allowed else error
       */
      int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
                       const struct path *new_dir, struct dentry *new_dentry)
      {
              struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
              struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
              struct path_cond cond = {
                      d_backing_inode(old_dentry)->i_uid,
                      d_backing_inode(old_dentry)->i_mode
              };
              char *buffer = NULL, *buffer2 = NULL;
              struct aa_profile *profile;
              int error;
      
              /* buffer freed below, lname is pointer in buffer */
              buffer = aa_get_buffer(false);
              buffer2 = aa_get_buffer(false);
              error = -ENOMEM;
              if (!buffer || !buffer2)
                      goto out;
      
              error = fn_for_each_confined(label, profile,
                              profile_path_link(profile, &link, buffer, &target,
                                                buffer2, &cond));
      out:
              aa_put_buffer(buffer);
              aa_put_buffer(buffer2);
              return error;
      }
      
      static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
                                  u32 request)
      {
              struct aa_label *l, *old;
      
              /* update caching of label on file_ctx */
              spin_lock(&fctx->lock);
              old = rcu_dereference_protected(fctx->label,
                                              lockdep_is_held(&fctx->lock));
              l = aa_label_merge(old, label, GFP_ATOMIC);
              if (l) {
                      if (l != old) {
                              rcu_assign_pointer(fctx->label, l);
                              aa_put_label(old);
                      } else
                              aa_put_label(l);
                      fctx->allow |= request;
              }
              spin_unlock(&fctx->lock);
      }
      
      static int __file_path_perm(const char *op, struct aa_label *label,
                                  struct aa_label *flabel, struct file *file,
                                  u32 request, u32 denied, bool in_atomic)
      {
              struct aa_profile *profile;
              struct aa_perms perms = {};
              struct path_cond cond = {
                      .uid = file_inode(file)->i_uid,
                      .mode = file_inode(file)->i_mode
              };
              char *buffer;
              int flags, error;
      
              /* revalidation due to label out of date. No revocation at this time */
              if (!denied && aa_label_is_subset(flabel, label))
                      /* TODO: check for revocation on stale profiles */
                      return 0;
      
              flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
              buffer = aa_get_buffer(in_atomic);
              if (!buffer)
                      return -ENOMEM;
      
              /* check every profile in task label not in current cache */
              error = fn_for_each_not_in_set(flabel, label, profile,
                              profile_path_perm(op, profile, &file->f_path, buffer,
                                                request, &cond, flags, &perms));
              if (denied && !error) {
                      /*
                       * check every profile in file label that was not tested
                       * in the initial check above.
                       *
                       * TODO: cache full perms so this only happens because of
                       * conditionals
                       * TODO: don't audit here
                       */
                      if (label == flabel)
                              error = fn_for_each(label, profile,
                                      profile_path_perm(op, profile, &file->f_path,
                                                        buffer, request, &cond, flags,
                                                        &perms));
                      else
                              error = fn_for_each_not_in_set(label, flabel, profile,
                                      profile_path_perm(op, profile, &file->f_path,
                                                        buffer, request, &cond, flags,
                                                        &perms));
              }
              if (!error)
                      update_file_ctx(file_ctx(file), label, request);
      
              aa_put_buffer(buffer);
      
              return error;
      }
      
      static int __file_sock_perm(const char *op, struct aa_label *label,
                                  struct aa_label *flabel, struct file *file,
                                  u32 request, u32 denied)
      {
              struct socket *sock = (struct socket *) file->private_data;
              int error;
      
              AA_BUG(!sock);
      
              /* revalidation due to label out of date. No revocation at this time */
              if (!denied && aa_label_is_subset(flabel, label))
                      return 0;
      
              /* TODO: improve to skip profiles cached in flabel */
              error = aa_sock_file_perm(label, op, request, sock);
              if (denied) {
                      /* TODO: improve to skip profiles checked above */
                      /* check every profile in file label to is cached */
                      last_error(error, aa_sock_file_perm(flabel, op, request, sock));
              }
              if (!error)
                      update_file_ctx(file_ctx(file), label, request);
      
              return error;
      }
      
      /**
       * aa_file_perm - do permission revalidation check & audit for @file
       * @op: operation being checked
       * @label: label being enforced   (NOT NULL)
       * @file: file to revalidate access permissions on  (NOT NULL)
       * @request: requested permissions
       * @in_atomic: whether allocations need to be done in atomic context
       *
       * Returns: %0 if access allowed else error
       */
      int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
                       u32 request, bool in_atomic)
      {
              struct aa_file_ctx *fctx;
              struct aa_label *flabel;
              u32 denied;
              int error = 0;
      
 2872         AA_BUG(!label);
 2872         AA_BUG(!file);
      
 2872         fctx = file_ctx(file);
      
 2872         rcu_read_lock();
 2872         flabel  = aa_get_newest_label(rcu_dereference(fctx->label));
 2872         rcu_read_unlock();
              AA_BUG(!flabel);
      
              /* revalidate access, if task is unconfined, or the cached cred
               * doesn't match or if the request is for more permissions than
               * was granted.
               *
               * Note: the test for !unconfined(flabel) is to handle file
               *       delegation from unconfined tasks
               */
              denied = request & ~fctx->allow;
 2872         if (unconfined(label) || unconfined(flabel) ||
                  (!denied && aa_label_is_subset(flabel, label)))
                      goto done;
      
              /* TODO: label cross check */
      
              if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
                      error = __file_path_perm(op, label, flabel, file, request,
                                               denied, in_atomic);
      
              else if (S_ISSOCK(file_inode(file)->i_mode))
                      error = __file_sock_perm(op, label, flabel, file, request,
                                               denied);
      done:
 2872         aa_put_label(flabel);
 2872         return error;
      }
      
      static void revalidate_tty(struct aa_label *label)
      {
              struct tty_struct *tty;
              int drop_tty = 0;
      
              tty = get_current_tty();
              if (!tty)
                      return;
      
              spin_lock(&tty->files_lock);
              if (!list_empty(&tty->tty_files)) {
                      struct tty_file_private *file_priv;
                      struct file *file;
                      /* TODO: Revalidate access to controlling tty. */
                      file_priv = list_first_entry(&tty->tty_files,
                                                   struct tty_file_private, list);
                      file = file_priv->file;
      
                      if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
                                       IN_ATOMIC))
                              drop_tty = 1;
              }
              spin_unlock(&tty->files_lock);
              tty_kref_put(tty);
      
              if (drop_tty)
                      no_tty();
      }
      
      static int match_file(const void *p, struct file *file, unsigned int fd)
      {
              struct aa_label *label = (struct aa_label *)p;
      
              if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
                               IN_ATOMIC))
                      return fd + 1;
              return 0;
      }
      
      
      /* based on selinux's flush_unauthorized_files */
      void aa_inherit_files(const struct cred *cred, struct files_struct *files)
      {
              struct aa_label *label = aa_get_newest_cred_label(cred);
              struct file *devnull = NULL;
              unsigned int n;
      
              revalidate_tty(label);
      
              /* Revalidate access to inherited open files. */
              n = iterate_fd(files, 0, match_file, label);
              if (!n) /* none found? */
                      goto out;
      
              devnull = dentry_open(&aa_null, O_RDWR, cred);
              if (IS_ERR(devnull))
                      devnull = NULL;
              /* replace all the matching ones with this */
              do {
                      replace_fd(n - 1, devnull, 0);
              } while ((n = iterate_fd(files, n, match_file, label)) != 0);
              if (devnull)
                      fput(devnull);
      out:
              aa_put_label(label);
      }
      // SPDX-License-Identifier: GPL-2.0
      /*
       *        XFRM virtual interface
       *
       *        Copyright (C) 2018 secunet Security Networks AG
       *
       *        Author:
       *        Steffen Klassert <steffen.klassert@secunet.com>
       */
      
      #include <linux/module.h>
      #include <linux/capability.h>
      #include <linux/errno.h>
      #include <linux/types.h>
      #include <linux/sockios.h>
      #include <linux/icmp.h>
      #include <linux/if.h>
      #include <linux/in.h>
      #include <linux/ip.h>
      #include <linux/net.h>
      #include <linux/in6.h>
      #include <linux/netdevice.h>
      #include <linux/if_link.h>
      #include <linux/if_arp.h>
      #include <linux/icmpv6.h>
      #include <linux/init.h>
      #include <linux/route.h>
      #include <linux/rtnetlink.h>
      #include <linux/netfilter_ipv6.h>
      #include <linux/slab.h>
      #include <linux/hash.h>
      
      #include <linux/uaccess.h>
      #include <linux/atomic.h>
      
      #include <net/icmp.h>
      #include <net/ip.h>
      #include <net/ipv6.h>
      #include <net/ip6_route.h>
      #include <net/addrconf.h>
      #include <net/xfrm.h>
      #include <net/net_namespace.h>
      #include <net/netns/generic.h>
      #include <linux/etherdevice.h>
      
      static int xfrmi_dev_init(struct net_device *dev);
      static void xfrmi_dev_setup(struct net_device *dev);
      static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
      static unsigned int xfrmi_net_id __read_mostly;
      
      struct xfrmi_net {
              /* lists for storing interfaces in use */
              struct xfrm_if __rcu *xfrmi[1];
      };
      
      #define for_each_xfrmi_rcu(start, xi) \
              for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
      
      static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
      {
              struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
              struct xfrm_if *xi;
      
              for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
                      if (x->if_id == xi->p.if_id &&
                          (xi->dev->flags & IFF_UP))
                              return xi;
              }
      
              return NULL;
      }
      
      static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
                                                  unsigned short family)
      {
              struct xfrmi_net *xfrmn;
              struct xfrm_if *xi;
              int ifindex = 0;
      
    2         if (!secpath_exists(skb) || !skb->dev)
    2                 return NULL;
      
              switch (family) {
              case AF_INET6:
                      ifindex = inet6_sdif(skb);
                      break;
              case AF_INET:
                      ifindex = inet_sdif(skb);
                      break;
              }
              if (!ifindex)
                      ifindex = skb->dev->ifindex;
      
              xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
      
              for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
                      if (ifindex == xi->dev->ifindex &&
                              (xi->dev->flags & IFF_UP))
                                      return xi;
              }
      
              return NULL;
      }
      
      static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
      {
              struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
      
              rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
              rcu_assign_pointer(*xip, xi);
      }
      
      static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
      {
              struct xfrm_if __rcu **xip;
              struct xfrm_if *iter;
      
              for (xip = &xfrmn->xfrmi[0];
                   (iter = rtnl_dereference(*xip)) != NULL;
                   xip = &iter->next) {
                      if (xi == iter) {
                              rcu_assign_pointer(*xip, xi->next);
                              break;
                      }
              }
      }
      
      static void xfrmi_dev_free(struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
      
              gro_cells_destroy(&xi->gro_cells);
              free_percpu(dev->tstats);
      }
      
      static int xfrmi_create(struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct net *net = dev_net(dev);
              struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
              int err;
      
              dev->rtnl_link_ops = &xfrmi_link_ops;
              err = register_netdevice(dev);
              if (err < 0)
                      goto out;
      
              dev_hold(dev);
              xfrmi_link(xfrmn, xi);
      
              return 0;
      
      out:
              return err;
      }
      
      static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
      {
              struct xfrm_if __rcu **xip;
              struct xfrm_if *xi;
              struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
      
              for (xip = &xfrmn->xfrmi[0];
                   (xi = rtnl_dereference(*xip)) != NULL;
                   xip = &xi->next)
                      if (xi->p.if_id == p->if_id)
                              return xi;
      
              return NULL;
      }
      
      static void xfrmi_dev_uninit(struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
      
              xfrmi_unlink(xfrmn, xi);
              dev_put(dev);
      }
      
      static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
      {
              skb->tstamp = 0;
              skb->pkt_type = PACKET_HOST;
              skb->skb_iif = 0;
              skb->ignore_df = 0;
              skb_dst_drop(skb);
              nf_reset_ct(skb);
              nf_reset_trace(skb);
      
              if (!xnet)
                      return;
      
              ipvs_reset(skb);
              secpath_reset(skb);
              skb_orphan(skb);
              skb->mark = 0;
      }
      
      static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
      {
              const struct xfrm_mode *inner_mode;
              struct pcpu_sw_netstats *tstats;
              struct net_device *dev;
              struct xfrm_state *x;
              struct xfrm_if *xi;
              bool xnet;
      
              if (err && !secpath_exists(skb))
                      return 0;
      
              x = xfrm_input_state(skb);
      
              xi = xfrmi_lookup(xs_net(x), x);
              if (!xi)
                      return 1;
      
              dev = xi->dev;
              skb->dev = dev;
      
              if (err) {
                      dev->stats.rx_errors++;
                      dev->stats.rx_dropped++;
      
                      return 0;
              }
      
              xnet = !net_eq(xi->net, dev_net(skb->dev));
      
              if (xnet) {
                      inner_mode = &x->inner_mode;
      
                      if (x->sel.family == AF_UNSPEC) {
                              inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
                              if (inner_mode == NULL) {
                                      XFRM_INC_STATS(dev_net(skb->dev),
                                                     LINUX_MIB_XFRMINSTATEMODEERROR);
                                      return -EINVAL;
                              }
                      }
      
                      if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
                                             inner_mode->family))
                              return -EPERM;
              }
      
              xfrmi_scrub_packet(skb, xnet);
      
              tstats = this_cpu_ptr(dev->tstats);
      
              u64_stats_update_begin(&tstats->syncp);
              tstats->rx_packets++;
              tstats->rx_bytes += skb->len;
              u64_stats_update_end(&tstats->syncp);
      
              return 0;
      }
      
      static int
      xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct net_device_stats *stats = &xi->dev->stats;
              struct dst_entry *dst = skb_dst(skb);
              unsigned int length = skb->len;
              struct net_device *tdev;
              struct xfrm_state *x;
              int err = -1;
              int mtu;
      
              if (!dst)
                      goto tx_err_link_failure;
      
              dst_hold(dst);
              dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
              if (IS_ERR(dst)) {
                      err = PTR_ERR(dst);
                      dst = NULL;
                      goto tx_err_link_failure;
              }
      
              x = dst->xfrm;
              if (!x)
                      goto tx_err_link_failure;
      
              if (x->if_id != xi->p.if_id)
                      goto tx_err_link_failure;
      
              tdev = dst->dev;
      
              if (tdev == dev) {
                      stats->collisions++;
                      net_warn_ratelimited("%s: Local routing loop detected!\n",
                                           dev->name);
                      goto tx_err_dst_release;
              }
      
              mtu = dst_mtu(dst);
              if (!skb->ignore_df && skb->len > mtu) {
                      skb_dst_update_pmtu(skb, mtu);
      
                      if (skb->protocol == htons(ETH_P_IPV6)) {
                              if (mtu < IPV6_MIN_MTU)
                                      mtu = IPV6_MIN_MTU;
      
                              icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                      } else {
                              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                        htonl(mtu));
                      }
      
                      dst_release(dst);
                      return -EMSGSIZE;
              }
      
              xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
              skb_dst_set(skb, dst);
              skb->dev = tdev;
      
              err = dst_output(xi->net, skb->sk, skb);
              if (net_xmit_eval(err) == 0) {
                      struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
      
                      u64_stats_update_begin(&tstats->syncp);
                      tstats->tx_bytes += length;
                      tstats->tx_packets++;
                      u64_stats_update_end(&tstats->syncp);
              } else {
                      stats->tx_errors++;
                      stats->tx_aborted_errors++;
              }
      
              return 0;
      tx_err_link_failure:
              stats->tx_carrier_errors++;
              dst_link_failure(skb);
      tx_err_dst_release:
              dst_release(dst);
              return err;
      }
      
      static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct net_device_stats *stats = &xi->dev->stats;
              struct flowi fl;
              int ret;
      
              memset(&fl, 0, sizeof(fl));
      
              switch (skb->protocol) {
              case htons(ETH_P_IPV6):
                      xfrm_decode_session(skb, &fl, AF_INET6);
                      memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
                      break;
              case htons(ETH_P_IP):
                      xfrm_decode_session(skb, &fl, AF_INET);
                      memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                      break;
              default:
                      goto tx_err;
              }
      
              fl.flowi_oif = xi->p.link;
      
              ret = xfrmi_xmit2(skb, dev, &fl);
              if (ret < 0)
                      goto tx_err;
      
              return NETDEV_TX_OK;
      
      tx_err:
              stats->tx_errors++;
              stats->tx_dropped++;
              kfree_skb(skb);
              return NETDEV_TX_OK;
      }
      
      static int xfrmi4_err(struct sk_buff *skb, u32 info)
      {
              const struct iphdr *iph = (const struct iphdr *)skb->data;
              struct net *net = dev_net(skb->dev);
              int protocol = iph->protocol;
              struct ip_comp_hdr *ipch;
              struct ip_esp_hdr *esph;
              struct ip_auth_hdr *ah ;
              struct xfrm_state *x;
              struct xfrm_if *xi;
              __be32 spi;
      
              switch (protocol) {
              case IPPROTO_ESP:
                      esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
                      spi = esph->spi;
                      break;
              case IPPROTO_AH:
                      ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
                      spi = ah->spi;
                      break;
              case IPPROTO_COMP:
                      ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
                      spi = htonl(ntohs(ipch->cpi));
                      break;
              default:
                      return 0;
              }
      
              switch (icmp_hdr(skb)->type) {
              case ICMP_DEST_UNREACH:
                      if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
                              return 0;
              case ICMP_REDIRECT:
                      break;
              default:
                      return 0;
              }
      
              x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                                    spi, protocol, AF_INET);
              if (!x)
                      return 0;
      
              xi = xfrmi_lookup(net, x);
              if (!xi) {
                      xfrm_state_put(x);
                      return -1;
              }
      
              if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                      ipv4_update_pmtu(skb, net, info, 0, protocol);
              else
                      ipv4_redirect(skb, net, 0, protocol);
              xfrm_state_put(x);
      
              return 0;
      }
      
      static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                          u8 type, u8 code, int offset, __be32 info)
      {
              const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
              struct net *net = dev_net(skb->dev);
              int protocol = iph->nexthdr;
              struct ip_comp_hdr *ipch;
              struct ip_esp_hdr *esph;
              struct ip_auth_hdr *ah;
              struct xfrm_state *x;
              struct xfrm_if *xi;
              __be32 spi;
      
              switch (protocol) {
              case IPPROTO_ESP:
                      esph = (struct ip_esp_hdr *)(skb->data + offset);
                      spi = esph->spi;
                      break;
              case IPPROTO_AH:
                      ah = (struct ip_auth_hdr *)(skb->data + offset);
                      spi = ah->spi;
                      break;
              case IPPROTO_COMP:
                      ipch = (struct ip_comp_hdr *)(skb->data + offset);
                      spi = htonl(ntohs(ipch->cpi));
                      break;
              default:
                      return 0;
              }
      
              if (type != ICMPV6_PKT_TOOBIG &&
                  type != NDISC_REDIRECT)
                      return 0;
      
              x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                                    spi, protocol, AF_INET6);
              if (!x)
                      return 0;
      
              xi = xfrmi_lookup(net, x);
              if (!xi) {
                      xfrm_state_put(x);
                      return -1;
              }
      
              if (type == NDISC_REDIRECT)
                      ip6_redirect(skb, net, skb->dev->ifindex, 0,
                                   sock_net_uid(net, NULL));
              else
                      ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
              xfrm_state_put(x);
      
              return 0;
      }
      
      static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
      {
              if (xi->p.link != p->link)
                      return -EINVAL;
      
              xi->p.if_id = p->if_id;
      
              return 0;
      }
      
      static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
      {
              struct net *net = xi->net;
              struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
              int err;
      
              xfrmi_unlink(xfrmn, xi);
              synchronize_net();
              err = xfrmi_change(xi, p);
              xfrmi_link(xfrmn, xi);
              netdev_state_change(xi->dev);
              return err;
      }
      
      static void xfrmi_get_stats64(struct net_device *dev,
                                     struct rtnl_link_stats64 *s)
      {
              int cpu;
      
              for_each_possible_cpu(cpu) {
                      struct pcpu_sw_netstats *stats;
                      struct pcpu_sw_netstats tmp;
                      int start;
      
                      stats = per_cpu_ptr(dev->tstats, cpu);
                      do {
                              start = u64_stats_fetch_begin_irq(&stats->syncp);
                              tmp.rx_packets = stats->rx_packets;
                              tmp.rx_bytes   = stats->rx_bytes;
                              tmp.tx_packets = stats->tx_packets;
                              tmp.tx_bytes   = stats->tx_bytes;
                      } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
      
                      s->rx_packets += tmp.rx_packets;
                      s->rx_bytes   += tmp.rx_bytes;
                      s->tx_packets += tmp.tx_packets;
                      s->tx_bytes   += tmp.tx_bytes;
              }
      
              s->rx_dropped = dev->stats.rx_dropped;
              s->tx_dropped = dev->stats.tx_dropped;
      }
      
      static int xfrmi_get_iflink(const struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
      
              return xi->p.link;
      }
      
      
      static const struct net_device_ops xfrmi_netdev_ops = {
              .ndo_init        = xfrmi_dev_init,
              .ndo_uninit        = xfrmi_dev_uninit,
              .ndo_start_xmit = xfrmi_xmit,
              .ndo_get_stats64 = xfrmi_get_stats64,
              .ndo_get_iflink = xfrmi_get_iflink,
      };
      
      static void xfrmi_dev_setup(struct net_device *dev)
      {
              dev->netdev_ops         = &xfrmi_netdev_ops;
              dev->type                = ARPHRD_NONE;
              dev->hard_header_len         = ETH_HLEN;
              dev->min_header_len        = ETH_HLEN;
              dev->mtu                = ETH_DATA_LEN;
              dev->min_mtu                = ETH_MIN_MTU;
              dev->max_mtu                = ETH_DATA_LEN;
              dev->addr_len                = ETH_ALEN;
              dev->flags                 = IFF_NOARP;
              dev->needs_free_netdev        = true;
              dev->priv_destructor        = xfrmi_dev_free;
              netif_keep_dst(dev);
      
              eth_broadcast_addr(dev->broadcast);
      }
      
      static int xfrmi_dev_init(struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
              int err;
      
              dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
              if (!dev->tstats)
                      return -ENOMEM;
      
              err = gro_cells_init(&xi->gro_cells, dev);
              if (err) {
                      free_percpu(dev->tstats);
                      return err;
              }
      
              dev->features |= NETIF_F_LLTX;
      
              if (phydev) {
                      dev->needed_headroom = phydev->needed_headroom;
                      dev->needed_tailroom = phydev->needed_tailroom;
      
                      if (is_zero_ether_addr(dev->dev_addr))
                              eth_hw_addr_inherit(dev, phydev);
                      if (is_zero_ether_addr(dev->broadcast))
                              memcpy(dev->broadcast, phydev->broadcast,
                                     dev->addr_len);
              } else {
                      eth_hw_addr_random(dev);
                      eth_broadcast_addr(dev->broadcast);
              }
      
              return 0;
      }
      
      static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
                               struct netlink_ext_ack *extack)
      {
              return 0;
      }
      
      static void xfrmi_netlink_parms(struct nlattr *data[],
                                     struct xfrm_if_parms *parms)
      {
              memset(parms, 0, sizeof(*parms));
      
              if (!data)
                      return;
      
              if (data[IFLA_XFRM_LINK])
                      parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
      
              if (data[IFLA_XFRM_IF_ID])
                      parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
      }
      
      static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
                              struct nlattr *tb[], struct nlattr *data[],
                              struct netlink_ext_ack *extack)
      {
              struct net *net = dev_net(dev);
              struct xfrm_if_parms p;
              struct xfrm_if *xi;
              int err;
      
              xfrmi_netlink_parms(data, &p);
              xi = xfrmi_locate(net, &p);
              if (xi)
                      return -EEXIST;
      
              xi = netdev_priv(dev);
              xi->p = p;
              xi->net = net;
              xi->dev = dev;
      
              err = xfrmi_create(dev);
              return err;
      }
      
      static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
      {
              unregister_netdevice_queue(dev, head);
      }
      
      static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
                                 struct nlattr *data[],
                                 struct netlink_ext_ack *extack)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct net *net = xi->net;
              struct xfrm_if_parms p;
      
              xfrmi_netlink_parms(data, &p);
              xi = xfrmi_locate(net, &p);
              if (!xi) {
                      xi = netdev_priv(dev);
              } else {
                      if (xi->dev != dev)
                              return -EEXIST;
              }
      
              return xfrmi_update(xi, &p);
      }
      
      static size_t xfrmi_get_size(const struct net_device *dev)
      {
              return
                      /* IFLA_XFRM_LINK */
                      nla_total_size(4) +
                      /* IFLA_XFRM_IF_ID */
                      nla_total_size(4) +
                      0;
      }
      
      static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
              struct xfrm_if_parms *parm = &xi->p;
      
              if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
                  nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
                      goto nla_put_failure;
              return 0;
      
      nla_put_failure:
              return -EMSGSIZE;
      }
      
      static struct net *xfrmi_get_link_net(const struct net_device *dev)
      {
              struct xfrm_if *xi = netdev_priv(dev);
      
              return xi->net;
      }
      
      static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
              [IFLA_XFRM_LINK]        = { .type = NLA_U32 },
              [IFLA_XFRM_IF_ID]        = { .type = NLA_U32 },
      };
      
      static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
              .kind                = "xfrm",
              .maxtype        = IFLA_XFRM_MAX,
              .policy                = xfrmi_policy,
              .priv_size        = sizeof(struct xfrm_if),
              .setup                = xfrmi_dev_setup,
              .validate        = xfrmi_validate,
              .newlink        = xfrmi_newlink,