From a4559a2b37ebbbd3c23041125624f55ef36905d1 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 2 Feb 2026 18:33:21 +0100 Subject: [PATCH 001/129] crypto: octeontx - Replace scnprintf with strscpy in print_ucode_info Replace scnprintf("%s", ...) with the faster and more direct strscpy(). Remove the parentheses while we're at it. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 09e6a8474d1a..e0f38d32bc93 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -10,6 +10,7 @@ #include #include +#include #include #include "otx_cpt_common.h" #include "otx_cptpf_ucode.h" @@ -509,13 +510,12 @@ EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type); static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp, char *buf, int size) { - if (eng_grp->mirror.is_ena) { + if (eng_grp->mirror.is_ena) scnprintf(buf, size, "%s (shared with engine_group%d)", eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str, eng_grp->mirror.idx); - } else { - scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str); - } + else + strscpy(buf, eng_grp->ucode[0].ver_str, size); } static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp, From 476466e927ab5354dae90ea0bc3353cfcec0646a Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 3 Feb 2026 00:01:17 +0100 Subject: [PATCH 002/129] crypto: caam - Replace snprintf with strscpy in caam_hash_alloc Replace snprintf("%s", ...) with the faster and more direct strscpy(). Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 13 +++++-------- drivers/crypto/caam/caamhash.c | 12 ++++-------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 167372936ca7..553994228a17 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -4644,16 +4645,12 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, alg = &halg->halg.base; if (keyed) { - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", - template->hmac_name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->hmac_driver_name); + strscpy(alg->cra_name, template->hmac_name); + strscpy(alg->cra_driver_name, template->hmac_driver_name); t_alg->is_hmac = true; } else { - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", - template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); + strscpy(alg->cra_name, template->name); + strscpy(alg->cra_driver_name, template->driver_name); t_alg->ahash_alg.setkey = NULL; t_alg->is_hmac = false; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 628c43a7efc4..e0a23c55c10e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1913,16 +1913,12 @@ caam_hash_alloc(struct caam_hash_template *template, alg = &halg->halg.base; if (keyed) { - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", - template->hmac_name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->hmac_driver_name); + strscpy(alg->cra_name, template->hmac_name); + strscpy(alg->cra_driver_name, template->hmac_driver_name); t_alg->is_hmac = true; } else { - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", - template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); + strscpy(alg->cra_name, template->name); + strscpy(alg->cra_driver_name, template->driver_name); halg->setkey = NULL; t_alg->is_hmac = false; } From c75daa3730132c55dea7cc9c0f8818aea491fe0c Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Tue, 3 Feb 2026 19:21:51 +0100 Subject: [PATCH 003/129] crypto: safexcel - Group authenc ciphersuites Move authenc(sha1,des) and authenc(sha1,3des) ciphersuites to appropriate groups. No functional changes intended. Signed-off-by: Aleksander Jan Bajkowski Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 ++-- drivers/crypto/inside-secure/safexcel.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 660f45ab8647..072059154d1c 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1209,7 +1209,6 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha256_cbc_aes, &safexcel_alg_authenc_hmac_sha384_cbc_aes, &safexcel_alg_authenc_hmac_sha512_cbc_aes, - &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha1_ctr_aes, &safexcel_alg_authenc_hmac_sha224_ctr_aes, &safexcel_alg_authenc_hmac_sha256_ctr_aes, @@ -1241,11 +1240,12 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha3_256, &safexcel_alg_hmac_sha3_384, &safexcel_alg_hmac_sha3_512, - &safexcel_alg_authenc_hmac_sha1_cbc_des, + &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha1_cbc_des, &safexcel_alg_authenc_hmac_sha256_cbc_des, &safexcel_alg_authenc_hmac_sha224_cbc_des, &safexcel_alg_authenc_hmac_sha512_cbc_des, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 0f27367a85fa..ca012e2845f7 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -950,7 +950,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; -extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; @@ -982,11 +981,12 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512; -extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des; From f050e4209ab0ba3f13bb6272a07ce87cbea922c9 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Tue, 3 Feb 2026 19:21:52 +0100 Subject: [PATCH 004/129] crypto: safexcel - Add support for authenc(hmac(md5),*) suites This patch adds support for the following AEAD ciphersuites: - authenc(hmac(md5),cbc(aes)) - authenc(hmac(md5),cbc(des))) - authenc(hmac(md5),cbc(des3_ede)) - authenc(hmac(md5),rfc3686(ctr(aes))) The first three ciphersuites were tested using testmgr and the recently sent test vectors. They passed self-tests. This is enhanced version of the patch found in the mtk-openwrt-feeds repo. Signed-off-by: Aleksander Jan Bajkowski Reviewed-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 + drivers/crypto/inside-secure/safexcel.h | 4 + .../crypto/inside-secure/safexcel_cipher.c | 149 ++++++++++++++++++ 3 files changed, 157 insertions(+) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 072059154d1c..fb4936e7afa2 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1204,11 +1204,13 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha256, &safexcel_alg_hmac_sha384, &safexcel_alg_hmac_sha512, + &safexcel_alg_authenc_hmac_md5_cbc_aes, &safexcel_alg_authenc_hmac_sha1_cbc_aes, &safexcel_alg_authenc_hmac_sha224_cbc_aes, &safexcel_alg_authenc_hmac_sha256_cbc_aes, &safexcel_alg_authenc_hmac_sha384_cbc_aes, &safexcel_alg_authenc_hmac_sha512_cbc_aes, + &safexcel_alg_authenc_hmac_md5_ctr_aes, &safexcel_alg_authenc_hmac_sha1_ctr_aes, &safexcel_alg_authenc_hmac_sha224_ctr_aes, &safexcel_alg_authenc_hmac_sha256_ctr_aes, @@ -1240,11 +1242,13 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha3_256, &safexcel_alg_hmac_sha3_384, &safexcel_alg_hmac_sha3_512, + &safexcel_alg_authenc_hmac_md5_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede, &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede, + &safexcel_alg_authenc_hmac_md5_cbc_des, &safexcel_alg_authenc_hmac_sha1_cbc_des, &safexcel_alg_authenc_hmac_sha256_cbc_des, &safexcel_alg_authenc_hmac_sha224_cbc_des, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ca012e2845f7..52fd460c0e9b 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -945,11 +945,13 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; extern struct safexcel_alg_template safexcel_alg_hmac_sha384; extern struct safexcel_alg_template safexcel_alg_hmac_sha512; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; @@ -981,11 +983,13 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384; extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 27b180057417..a8349b684693 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -462,6 +463,9 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, /* Auth key */ switch (ctx->hash_alg) { + case CONTEXT_CONTROL_CRYPTO_ALG_MD5: + alg = "safexcel-md5"; + break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA1: alg = "safexcel-sha1"; break; @@ -1662,6 +1666,42 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) return 0; } +static int safexcel_aead_md5_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; + ctx->state_sz = MD5_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_md5_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1842,6 +1882,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { }, }; +static int safexcel_aead_md5_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_md5_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_md5_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); @@ -2027,6 +2104,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = { }, }; +static int safexcel_aead_md5_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_md5_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_md5_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); @@ -2212,6 +2326,41 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = { }, }; +static int safexcel_aead_md5_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_md5_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-md5-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_md5_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); From 0441ee8d35ad6998da5043c65c4124904e8daee2 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Fri, 6 Feb 2026 20:26:59 +0100 Subject: [PATCH 005/129] crypto: tesmgr - allow authenc(hmac(sha224/sha384),cbc(aes)) in fips mode The remaining combinations of AES-CBC and SHA* have already been marked as allowed. This commit does the same for SHA224 and SHA384. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 49b607f65f63..9f41e7b50edc 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4134,6 +4134,7 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha224),cbc(aes))", .generic_driver = "authenc(hmac-sha224-lib,cbc(aes-generic))", .test = alg_test_aead, + .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha224_aes_cbc_tv_temp) } @@ -4196,6 +4197,7 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha384),cbc(aes))", .generic_driver = "authenc(hmac-sha384-lib,cbc(aes-generic))", .test = alg_test_aead, + .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha384_aes_cbc_tv_temp) } From 404ba6b46b6e234384b962210a98931f7423f139 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Sat, 7 Feb 2026 15:51:03 +0100 Subject: [PATCH 006/129] crypto: testmgr - Add test vectors for authenc(hmac(md5),cbc(des)) Test vector was generated using a software implementation and then double checked on Mediatek MT7981 (safexcel) and NXP P2020 (talitos). Both platforms pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 7 ++++++ crypto/testmgr.h | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 9f41e7b50edc..8e92971b13ec 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4079,6 +4079,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .aead = __VECS(aegis128_tv_template) } + }, { + .alg = "authenc(hmac(md5),cbc(des))", + .generic_driver = "authenc(hmac-md5-lib,cbc(des-generic))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(hmac_md5_des_cbc_tv_temp) + } }, { .alg = "authenc(hmac(md5),cbc(des3_ede))", .generic_driver = "authenc(hmac-md5-lib,cbc(des3_ede-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1c69c11c0cdb..94727397988a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -16137,6 +16137,63 @@ static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_md5_des_cbc_tv_temp[] = { + { /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x08" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", + .klen = 8 + 16 + 8, + .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" + "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .alen = 16, + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + "\x53\x20\x63\x65\x65\x72\x73\x74" + "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" + "\x20\x79\x65\x53\x72\x63\x74\x65" + "\x20\x73\x6f\x54\x20\x6f\x61\x4d" + "\x79\x6e\x53\x20\x63\x65\x65\x72" + "\x73\x74\x54\x20\x6f\x6f\x4d\x20" + "\x6e\x61\x20\x79\x65\x53\x72\x63" + "\x74\x65\x20\x73\x6f\x54\x20\x6f" + "\x61\x4d\x79\x6e\x53\x20\x63\x65" + "\x65\x72\x73\x74\x54\x20\x6f\x6f" + "\x4d\x20\x6e\x61\x20\x79\x65\x53" + "\x72\x63\x74\x65\x20\x73\x6f\x54" + "\x20\x6f\x61\x4d\x79\x6e\x53\x20" + "\x63\x65\x65\x72\x73\x74\x54\x20" + "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + "\x54\x31\x85\x37\xed\x6b\x01\x8d" + "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" + "\x41\xaa\x33\x91\xa7\x7d\x99\x88" + "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" + "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" + "\xaa\x9c\x11\xd5\x76\x67\xce\xde" + "\x56\xd7\x5a\x80\x69\xea\x3a\x02" + "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" + "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" + "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" + "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" + "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" + "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" + "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" + "\x53\xba\xe1\x76\xe3\x82\x07\x86" + "\x95\x9e\x7d\x37\x1e\x60\xaf\x7c" + "\x53\x12\x61\x68\xef\xb4\x47\xa6", + .clen = 128 + 16, + }, +}; + static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN From 2127a1bf8940ea76b4fc604fa6b5e4d734ca22ed Mon Sep 17 00:00:00 2001 From: Pat Somaru Date: Sat, 7 Feb 2026 13:20:01 -0500 Subject: [PATCH 007/129] crypto: virtio - Convert from tasklet to BH workqueue The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws such as the execution code accessing the tasklet item after the execution is complete which can lead to subtle use-after-free in certain usage scenarios and less-developed flush and cancel mechanisms. To replace tasklets, BH workqueue support was recently added. A BH workqueue behaves similarly to regular workqueues except that the queued work items are executed in the BH context. Convert virtio_crypto_core.c from tasklet to BH workqueue. Semantically, this is an equivalent conversion and there shouldn't be any user-visible behavior changes. The BH workqueue implementation uses the same softirq infrastructure, and performance-critical networking conversions have shown no measurable performance impact. Signed-off-by: Pat Somaru Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_common.h | 3 ++- drivers/crypto/virtio/virtio_crypto_core.c | 11 +++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h index e559bdadf4f9..0c2efdc83257 100644 --- a/drivers/crypto/virtio/virtio_crypto_common.h +++ b/drivers/crypto/virtio/virtio_crypto_common.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -29,7 +30,7 @@ struct data_queue { char name[32]; struct crypto_engine *engine; - struct tasklet_struct done_task; + struct work_struct done_work; }; struct virtio_crypto { diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 955bff8820da..ee83bf6568f0 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -70,9 +70,9 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl return 0; } -static void virtcrypto_done_task(unsigned long data) +static void virtcrypto_done_work(struct work_struct *work) { - struct data_queue *data_vq = (struct data_queue *)data; + struct data_queue *data_vq = from_work(data_vq, work, done_work); struct virtqueue *vq = data_vq->vq; struct virtio_crypto_request *vc_req; unsigned long flags; @@ -96,7 +96,7 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq) struct virtio_crypto *vcrypto = vq->vdev->priv; struct data_queue *dq = &vcrypto->data_vq[vq->index]; - tasklet_schedule(&dq->done_task); + queue_work(system_bh_wq, &dq->done_work); } static int virtcrypto_find_vqs(struct virtio_crypto *vi) @@ -150,8 +150,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) ret = -ENOMEM; goto err_engine; } - tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task, - (unsigned long)&vi->data_vq[i]); + INIT_WORK(&vi->data_vq[i].done_work, virtcrypto_done_work); } kfree(vqs_info); @@ -501,7 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev) if (virtcrypto_dev_started(vcrypto)) virtcrypto_dev_stop(vcrypto); for (i = 0; i < vcrypto->max_data_queues; i++) - tasklet_kill(&vcrypto->data_vq[i].done_task); + cancel_work_sync(&vcrypto->data_vq[i].done_work); virtio_reset_device(vdev); virtcrypto_free_unused_reqs(vcrypto); virtcrypto_clear_crypto_engines(vcrypto); From b7abbc8c7acaeb60c114b038f1fa91bbedb3d16a Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Sun, 8 Feb 2026 11:35:53 +0100 Subject: [PATCH 008/129] crypto: inside-secure/eip93 - fix register definition Checked the register definitions with the documentation[1]. Turns out that the PKTE_INBUF_CNT register has a bad offset. It's used in Direct Host Mode (DHM). The driver uses Autonomous Ring Mode (ARM), so it causes no harm. 1. ADSP-SC58x/ADSP-2158x SHARC+ Processor Hardware Reference Fixes: 9739f5f93b78 ("crypto: eip93 - Add Inside Secure SafeXcel EIP-93 crypto engine support") Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/eip93-regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h index 0490b8d15131..116b3fbb6ad7 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-regs.h +++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h @@ -109,7 +109,7 @@ #define EIP93_REG_PE_BUF_THRESH 0x10c #define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16) #define EIP93_PE_INBUF_THRESH GENMASK(7, 0) -#define EIP93_REG_PE_INBUF_COUNT 0x100 +#define EIP93_REG_PE_INBUF_COUNT 0x110 #define EIP93_REG_PE_OUTBUF_COUNT 0x114 #define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */ From 094c276da6a0d4971c3faae09a36b51d096659b2 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sun, 15 Feb 2026 21:51:53 +0100 Subject: [PATCH 009/129] crypto: atmel-sha204a - Fix error codes in OTP reads Return -EINVAL from atmel_i2c_init_read_otp_cmd() on invalid addresses instead of -1. Since the OTP zone is accessed in 4-byte blocks, valid addresses range from 0 to OTP_ZONE_SIZE / 4 - 1. Fix the bounds check accordingly. In atmel_sha204a_otp_read(), propagate the actual error code from atmel_i2c_init_read_otp_cmd() instead of -1. Also, return -EIO instead of -EINVAL when the device is not ready. Cc: stable@vger.kernel.org Fixes: e05ce444e9e5 ("crypto: atmel-sha204a - add reading from otp zone") Signed-off-by: Thorsten Blum Reviewed-by: Lothar Rubusch Signed-off-by: Herbert Xu --- drivers/crypto/atmel-i2c.c | 4 ++-- drivers/crypto/atmel-sha204a.c | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index da3cd986b1eb..59d11fa5caeb 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd); int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr) { - if (addr < 0 || addr > OTP_ZONE_SIZE) - return -1; + if (addr >= OTP_ZONE_SIZE / 4) + return -EINVAL; cmd->word_addr = COMMAND; cmd->opcode = OPCODE_READ; diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 98d1023007e3..8f422ed223d3 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -95,9 +95,10 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) { struct atmel_i2c_cmd cmd; - int ret = -1; + int ret; - if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) { + ret = atmel_i2c_init_read_otp_cmd(&cmd, addr); + if (ret < 0) { dev_err(&client->dev, "failed, invalid otp address %04X\n", addr); return ret; @@ -107,7 +108,7 @@ static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) if (cmd.data[0] == 0xff) { dev_err(&client->dev, "failed, device not ready\n"); - return -EINVAL; + return -EIO; } memcpy(otp, cmd.data+1, 4); From 635c3a757a567b2479639237f5f0d4d9439015f1 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 16 Feb 2026 08:45:51 +0100 Subject: [PATCH 010/129] crypto: atmel-sha204a - Fix OTP sysfs read and error handling Fix otp_show() to read and print all 64 bytes of the OTP zone. Previously, the loop only printed half of the OTP (32 bytes), and partial output was returned on read errors. Propagate the actual error from atmel_sha204a_otp_read() instead of producing partial output. Replace sprintf() with sysfs_emit_at(), which is preferred for formatting sysfs output because it provides safer bounds checking. Cc: stable@vger.kernel.org Fixes: 13909a0c8897 ("crypto: atmel-sha204a - provide the otp content") Signed-off-by: Thorsten Blum Reviewed-by: Lothar Rubusch Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha204a.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 8f422ed223d3..72c9d74d3062 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "atmel-i2c.h" @@ -121,21 +122,22 @@ static ssize_t otp_show(struct device *dev, { u16 addr; u8 otp[OTP_ZONE_SIZE]; - char *str = buf; struct i2c_client *client = to_i2c_client(dev); - int i; + ssize_t len = 0; + int i, ret; - for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) { - if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) { + for (addr = 0; addr < OTP_ZONE_SIZE / 4; addr++) { + ret = atmel_sha204a_otp_read(client, addr, otp + addr * 4); + if (ret < 0) { dev_err(dev, "failed to read otp zone\n"); - break; + return ret; } } - for (i = 0; i < addr*2; i++) - str += sprintf(str, "%02X", otp[i]); - str += sprintf(str, "\n"); - return str - buf; + for (i = 0; i < OTP_ZONE_SIZE; i++) + len += sysfs_emit_at(buf, len, "%02X", otp[i]); + len += sysfs_emit_at(buf, len, "\n"); + return len; } static DEVICE_ATTR_RO(otp); From 1eb6c478f1edc4384d8fea765cd13ac01199e8b5 Mon Sep 17 00:00:00 2001 From: Abhinaba Rakshit Date: Thu, 19 Feb 2026 15:09:13 +0530 Subject: [PATCH 011/129] dt-bindings: crypto: ice: add operating-points-v2 property for QCOM ICE Add support for specifying OPPs for the Qualcomm Inline Crypto Engine by allowing the use of the standard "operating-points-v2" property in the ICE device node. ICE clock management was handled by the storage drivers in legacy bindings, so the ICE driver itself had no mechanism for clock scaling. With the introduction of the new standalone ICE device node, clock control must now be performed directly by the ICE driver. Enabling operating-points-v2 allows the driver to describe and manage the frequency and voltage requirements for proper DVFS operation. Acked-by: Rob Herring (Arm) Signed-off-by: Abhinaba Rakshit Signed-off-by: Herbert Xu --- .../crypto/qcom,inline-crypto-engine.yaml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml index 061ff718b23d..13ade0a633ec 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml @@ -31,6 +31,11 @@ properties: clocks: maxItems: 1 + operating-points-v2: true + + opp-table: + type: object + required: - compatible - reg @@ -47,5 +52,26 @@ examples: "qcom,inline-crypto-engine"; reg = <0x01d88000 0x8000>; clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>; + + operating-points-v2 = <&ice_opp_table>; + + ice_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-100000000 { + opp-hz = /bits/ 64 <100000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-201500000 { + opp-hz = /bits/ 64 <201500000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-403000000 { + opp-hz = /bits/ 64 <403000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; }; ... From de4e66b763d1e81188cb2803ec109466582fc9d1 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 20 Feb 2026 14:31:36 +0100 Subject: [PATCH 012/129] crypto: atmel-sha204a - Fix uninitialized data access on OTP read error Return early if atmel_i2c_send_receive() fails to avoid checking potentially uninitialized data in 'cmd.data'. Cc: stable@vger.kernel.org Fixes: e05ce444e9e5 ("crypto: atmel-sha204a - add reading from otp zone") Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha204a.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 72c9d74d3062..f9c7a4206774 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -106,6 +106,10 @@ static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) } ret = atmel_i2c_send_receive(client, &cmd); + if (ret < 0) { + dev_err(&client->dev, "failed to read otp at %04X\n", addr); + return ret; + } if (cmd.data[0] == 0xff) { dev_err(&client->dev, "failed, device not ready\n"); From 095d50008d55d13f8fcf1bbeb7c6eba51779bc85 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 20 Feb 2026 15:03:13 +0100 Subject: [PATCH 013/129] crypto: atmel-ecc - Release client on allocation failure Call atmel_ecc_i2c_client_free() to release the I2C client reserved by atmel_ecc_i2c_client_alloc() when crypto_alloc_kpp() fails. Otherwise ->tfm_count will be out of sync. Fixes: 11105693fa05 ("crypto: atmel-ecc - introduce Microchip / Atmel ECC driver") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-ecc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index b6a77c8d439c..9c380351d2f9 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) if (IS_ERR(fallback)) { dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n", alg, PTR_ERR(fallback)); + atmel_ecc_i2c_client_free(ctx->client); return PTR_ERR(fallback); } From 1eccfd0e65f727258d50eddcbc9ede809475da92 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 23 Feb 2026 08:56:12 +0100 Subject: [PATCH 014/129] crypto: vmx - Remove disabled build directive CONFIG_CRYPTO_DEV_VMX has been moved to arch/powerpc - delete the disabled build directive. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 322ae8854e3e..283bbc650b5b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -38,7 +38,6 @@ obj-y += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ -#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-y += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ From 618195a7ac3dfb88ef7ff701439cabcba2096f31 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Mon, 23 Feb 2026 10:44:02 +0200 Subject: [PATCH 015/129] dt-bindings: crypto: qcom,inline-crypto-engine: Document the Eliza ICE Document the Inline Crypto Engine (ICE) on the Eliza platform. Signed-off-by: Abel Vesa Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml index 13ade0a633ec..876bf90ed96e 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml @@ -13,6 +13,7 @@ properties: compatible: items: - enum: + - qcom,eliza-inline-crypto-engine - qcom,kaanapali-inline-crypto-engine - qcom,milos-inline-crypto-engine - qcom,qcs8300-inline-crypto-engine From 39e2a75fffc78537504f25fac66530178ffb3ecd Mon Sep 17 00:00:00 2001 From: Daniele Alessandrelli Date: Mon, 23 Feb 2026 15:06:22 +0000 Subject: [PATCH 016/129] MAINTAINERS: Remove Daniele Alessandrelli as Keem Bay maintainer I'm leaving Intel soon. Remove myself as maintainer of Keem Bay architecture and related crypto drivers. The INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER has no replacement maintainer available, so mark it as Orphan. Signed-off-by: Daniele Alessandrelli Signed-off-by: Herbert Xu --- MAINTAINERS | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 55af015174a5..ee1f1b8eca79 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2913,7 +2913,6 @@ F: include/linux/soc/ixp4xx/qmgr.h ARM/INTEL KEEMBAY ARCHITECTURE M: Paul J. Murphy -M: Daniele Alessandrelli S: Maintained F: Documentation/devicetree/bindings/arm/intel,keembay.yaml F: arch/arm64/boot/dts/intel/keembay-evm.dts @@ -12985,8 +12984,7 @@ F: Documentation/devicetree/bindings/display/intel,keembay-display.yaml F: drivers/gpu/drm/kmb/ INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER -M: Daniele Alessandrelli -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml F: drivers/crypto/intel/keembay/Kconfig F: drivers/crypto/intel/keembay/Makefile @@ -12995,7 +12993,6 @@ F: drivers/crypto/intel/keembay/ocs-aes.c F: drivers/crypto/intel/keembay/ocs-aes.h INTEL KEEM BAY OCS ECC CRYPTO DRIVER -M: Daniele Alessandrelli M: Prabhjot Khurana M: Mark Gross S: Maintained @@ -13005,7 +13002,6 @@ F: drivers/crypto/intel/keembay/Makefile F: drivers/crypto/intel/keembay/keembay-ocs-ecc.c INTEL KEEM BAY OCS HCU CRYPTO DRIVER -M: Daniele Alessandrelli M: Declan Murphy S: Maintained F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml From 3193dd7e848b90cecca3b3f2ed4a04e6fca87bcb Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 23 Feb 2026 16:57:55 +0100 Subject: [PATCH 017/129] crypto: qce - Replace snprintf("%s") with strscpy Replace snprintf("%s", ...) with the faster and more direct strscpy(). Signed-off-by: Thorsten Blum Reviewed-by: Konrad Dybcio Signed-off-by: Herbert Xu --- drivers/crypto/qce/aead.c | 6 +++--- drivers/crypto/qce/sha.c | 6 +++--- drivers/crypto/qce/skcipher.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 846e1d42775d..1b487df45f36 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -5,6 +5,7 @@ */ #include #include +#include #include #include #include @@ -768,9 +769,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi alg = &tmpl->alg.aead; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->drv_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->drv_name); alg->base.cra_blocksize = def->blocksize; alg->chunksize = def->chunksize; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 402e4e64347d..1b37121cbcdc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "common.h" @@ -489,9 +490,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base->cra_module = THIS_MODULE; base->cra_init = qce_ahash_cra_init; - snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->drv_name); + strscpy(base->cra_name, def->name); + strscpy(base->cra_driver_name, def->drv_name); INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 4ad3a1702010..db0b648a56eb 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -446,9 +447,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg = &tmpl->alg.skcipher; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->drv_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->drv_name); alg->base.cra_blocksize = def->blocksize; alg->chunksize = def->chunksize; From 68e96c97369e63414c2e02d1dcb92680ac60cd2f Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 23 Feb 2026 17:57:37 +0100 Subject: [PATCH 018/129] crypto: atmel-i2c - Replace hard-coded bus clock rate with constant Replace 1000000L with I2C_MAX_FAST_MODE_PLUS_FREQ. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index 59d11fa5caeb..0e275dbdc8c5 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -370,7 +370,7 @@ int atmel_i2c_probe(struct i2c_client *client) } } - if (bus_clk_rate > 1000000L) { + if (bus_clk_rate > I2C_MAX_FAST_MODE_PLUS_FREQ) { dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n", bus_clk_rate); return -EINVAL; From c9429bf56405a326845a8a35357b5bdf1dc4558c Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Tue, 24 Feb 2026 19:29:54 +0000 Subject: [PATCH 019/129] rhashtable: consolidate hash computation in rht_key_get_hash() The else-if and else branches in rht_key_get_hash() both compute a hash using either params.hashfn or jhash, differing only in the source of key_len (params.key_len vs ht->p.key_len). Merge the two branches into one by using the ternary `params.key_len ?: ht->p.key_len` to select the key length, removing the duplicated logic. This also improves the performance of the else branch which previously always used jhash and never fell through to jhash2. This branch is going to be used by BPF resizable hashmap, which wraps rhashtable: https://lore.kernel.org/bpf/20260205-rhash-v1-0-30dd6d63c462@meta.com/ Signed-off-by: Mykyta Yatsenko Signed-off-by: Herbert Xu --- include/linux/rhashtable.h | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 133ccb39137a..0480509a6339 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -129,10 +129,10 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht, unsigned int hash; /* params must be equal to ht->p if it isn't constant. */ - if (!__builtin_constant_p(params.key_len)) + if (!__builtin_constant_p(params.key_len)) { hash = ht->p.hashfn(key, ht->key_len, hash_rnd); - else if (params.key_len) { - unsigned int key_len = params.key_len; + } else { + unsigned int key_len = params.key_len ? : ht->p.key_len; if (params.hashfn) hash = params.hashfn(key, key_len, hash_rnd); @@ -140,13 +140,6 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht, hash = jhash(key, key_len, hash_rnd); else hash = jhash2(key, key_len / sizeof(u32), hash_rnd); - } else { - unsigned int key_len = ht->p.key_len; - - if (params.hashfn) - hash = params.hashfn(key, key_len, hash_rnd); - else - hash = jhash(key, key_len, hash_rnd); } return hash; From 2f5b5ce1e4b89c76a2b177ee689101a274d1a3c6 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Feb 2026 17:45:00 -0800 Subject: [PATCH 020/129] crypto: acomp - repair kernel-doc warnings Correct kernel-doc: - add the @extra function parameter - add "_extra" to the mismatched function name - spell the "cmpl" parameter correctly to avoid these warnings: Warning: include/crypto/acompress.h:251 function parameter 'extra' not described in 'acomp_request_alloc_extra' Warning: include/crypto/acompress.h:251 expecting prototype for acomp_request_alloc(). Prototype was for acomp_request_alloc_extra() instead Warning: include/crypto/acompress.h:327 function parameter 'cmpl' not described in 'acomp_request_set_callback' Signed-off-by: Randy Dunlap Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 9eacb9fa375d..5d5358dfab73 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -240,9 +240,10 @@ static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) } /** - * acomp_request_alloc() -- allocates asynchronous (de)compression request + * acomp_request_alloc_extra() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @extra: amount of extra memory * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error @@ -318,7 +319,7 @@ static inline void acomp_request_free(struct acomp_req *req) * * @req: request that the callback will be set for * @flgs: specify for instance if the operation may backlog - * @cmlp: callback which will be called + * @cmpl: callback which will be called * @data: private data used by the caller */ static inline void acomp_request_set_callback(struct acomp_req *req, From 8fe0cdfd9cb073d4090e2f20f16dd4b44de7526e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Feb 2026 17:45:18 -0800 Subject: [PATCH 021/129] crypto: des - fix all kernel-doc warnings Use correct function parameter names and add Returns: sections to eliminate all kernel-doc warnings in des.h: Warning: include/crypto/des.h:41 function parameter 'keylen' not described in 'des_expand_key' Warning: include/crypto/des.h:41 No description found for return value of 'des_expand_key' Warning: include/crypto/des.h:54 function parameter 'keylen' not described in 'des3_ede_expand_key' Warning: include/crypto/des.h:54 No description found for return value of 'des3_ede_expand_key' Signed-off-by: Randy Dunlap Signed-off-by: Herbert Xu --- include/crypto/des.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/crypto/des.h b/include/crypto/des.h index 7812b4331ae4..73eec617f480 100644 --- a/include/crypto/des.h +++ b/include/crypto/des.h @@ -34,9 +34,9 @@ void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); * des_expand_key - Expand a DES input key into a key schedule * @ctx: the key schedule * @key: buffer containing the input key - * @len: size of the buffer contents + * @keylen: size of the buffer contents * - * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if * the key is accepted but has been found to be weak. */ int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); @@ -45,9 +45,9 @@ int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); * des3_ede_expand_key - Expand a triple DES input key into a key schedule * @ctx: the key schedule * @key: buffer containing the input key - * @len: size of the buffer contents + * @keylen: size of the buffer contents * - * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if * the key is accepted but has been found to be weak. Note that weak keys will * be rejected (and -EINVAL will be returned) when running in FIPS mode. */ From d2ad1cf29a98adafaf85ddd5ccad6e40c14bcff9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Feb 2026 17:45:28 -0800 Subject: [PATCH 022/129] crypto: ecc - correct kernel-doc format Fix all kernel-doc warnings in ecc.h: - use correct kernel-doc format - add some Returns: sections - fix spelling and parameter names Fixes these warnings: Warning: include/crypto/internal/ecc.h:82 function parameter 'nbytes' not described in 'ecc_digits_from_bytes' Warning: include/crypto/internal/ecc.h:82 function parameter 'out' not described in 'ecc_digits_from_bytes' Warning: include/crypto/internal/ecc.h:95 No description found for return value of 'ecc_is_key_valid' Warning: include/crypto/internal/ecc.h:110 No description found for return value of 'ecc_gen_privkey' Warning: include/crypto/internal/ecc.h:124 No description found for return value of 'ecc_make_pub_key' Warning: include/crypto/internal/ecc.h:143 No description found for return value of 'crypto_ecdh_shared_secret' Warning: include/crypto/internal/ecc.h:182 No description found for return value of 'vli_is_zero' Warning: include/crypto/internal/ecc.h:194 No description found for return value of 'vli_cmp' Warning: include/crypto/internal/ecc.h:209 function parameter 'right' not described in 'vli_sub' Warning: include/crypto/internal/ecc.h:271 expecting prototype for ecc_aloc_point(). Prototype was for ecc_alloc_point() instead Warning: include/crypto/internal/ecc.h:287 function parameter 'point' not described in 'ecc_point_is_zero' Signed-off-by: Randy Dunlap Signed-off-by: Herbert Xu --- include/crypto/internal/ecc.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h index 57cd75242141..a4b48d76f53a 100644 --- a/include/crypto/internal/ecc.h +++ b/include/crypto/internal/ecc.h @@ -72,8 +72,8 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit /** * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array * @in: Input byte array - * @nbytes Size of input byte array - * @out Output digits array + * @nbytes: Size of input byte array + * @out: Output digits array * @ndigits: Number of digits to create from byte array * * The first byte in the input byte array is expected to hold the most @@ -90,7 +90,7 @@ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, * @private_key: private key to be used for the given curve * @private_key_len: private key length * - * Returns 0 if the key is acceptable, a negative value otherwise + * Returns: 0 if the key is acceptable, a negative value otherwise */ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, unsigned int private_key_len); @@ -104,7 +104,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, * @ndigits: curve number of digits * @private_key: buffer for storing the generated private key * - * Returns 0 if the private key was generated successfully, a negative value + * Returns: 0 if the private key was generated successfully, a negative value * if an error occurred. */ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, @@ -118,7 +118,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, * @private_key: pregenerated private key for the given curve * @public_key: buffer for storing the generated public key * - * Returns 0 if the public key was generated successfully, a negative value + * Returns: 0 if the public key was generated successfully, a negative value * if an error occurred. */ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits, @@ -136,7 +136,7 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits, * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret * before using it for symmetric encryption or HMAC. * - * Returns 0 if the shared secret was generated successfully, a negative value + * Returns: 0 if the shared secret was generated successfully, a negative value * if an error occurred. */ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, @@ -179,6 +179,8 @@ int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, * * @vli: vli to check. * @ndigits: length of the @vli + * + * Returns: %true if vli == 0, %false otherwise. */ bool vli_is_zero(const u64 *vli, unsigned int ndigits); @@ -189,7 +191,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits); * @right: vli * @ndigits: length of both vlis * - * Returns sign of @left - @right, i.e. -1 if @left < @right, + * Returns: sign of @left - @right, i.e. -1 if @left < @right, * 0 if @left == @right, 1 if @left > @right. */ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); @@ -199,7 +201,7 @@ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); * * @result: where to write result * @left: vli - * @right vli + * @right: vli * @ndigits: length of all vlis * * Note: can modify in-place. @@ -263,7 +265,7 @@ void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits); /** - * ecc_aloc_point() - Allocate ECC point. + * ecc_alloc_point() - Allocate ECC point. * * @ndigits: Length of vlis in u64 qwords. * @@ -281,7 +283,7 @@ void ecc_free_point(struct ecc_point *p); /** * ecc_point_is_zero() - Check if point is zero. * - * @p: Point to check for zero. + * @point: Point to check for zero. * * Return: true if point is the point at infinity, false otherwise. */ From 73117ea6470dca787f70f33c001f9faf437a1c0b Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Thu, 26 Feb 2026 16:07:03 +0800 Subject: [PATCH 023/129] padata: Remove cpu online check from cpu add and removal During the CPU offline process, the dying CPU is cleared from the cpu_online_mask in takedown_cpu(). After this step, various CPUHP_*_DEAD callbacks are executed to perform cleanup jobs for the dead CPU, so this cpu online check in padata_cpu_dead() is unnecessary. Similarly, when executing padata_cpu_online() during the CPUHP_AP_ONLINE_DYN phase, the CPU has already been set in the cpu_online_mask, the action even occurs earlier than the CPUHP_AP_ONLINE_IDLE stage. Remove this unnecessary cpu online check in __padata_add_cpu() and __padata_remove_cpu(). Signed-off-by: Chuyi Zhou Acked-by: Daniel Jordan Signed-off-by: Herbert Xu --- kernel/padata.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/kernel/padata.c b/kernel/padata.c index 8657e6e0c224..9e7cfa5ed55b 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -732,32 +732,22 @@ EXPORT_SYMBOL(padata_set_cpumask); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { - int err = 0; + int err = padata_replace(pinst); - if (cpumask_test_cpu(cpu, cpu_online_mask)) { - err = padata_replace(pinst); - - if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && - padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) - __padata_start(pinst); - } + if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && + padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) + __padata_start(pinst); return err; } static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { - int err = 0; + if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) + __padata_stop(pinst); - if (!cpumask_test_cpu(cpu, cpu_online_mask)) { - if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || - !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) - __padata_stop(pinst); - - err = padata_replace(pinst); - } - - return err; + return padata_replace(pinst); } static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) From 35645ca63caa158e4df3853ada22630d861c821d Mon Sep 17 00:00:00 2001 From: T Pratham Date: Thu, 26 Feb 2026 18:24:39 +0530 Subject: [PATCH 024/129] crypto: ti - Add support for AES-CTR in DTHEv2 driver Add support for CTR mode of operation for AES algorithm in the AES Engine of the DTHEv2 hardware cryptographic engine. Signed-off-by: T Pratham Signed-off-by: Herbert Xu --- drivers/crypto/ti/Kconfig | 1 + drivers/crypto/ti/dthev2-aes.c | 173 ++++++++++++++++++++++++------ drivers/crypto/ti/dthev2-common.c | 19 ++++ drivers/crypto/ti/dthev2-common.h | 17 +++ 4 files changed, 180 insertions(+), 30 deletions(-) diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig index a3692ceec49b..6027e12de279 100644 --- a/drivers/crypto/ti/Kconfig +++ b/drivers/crypto/ti/Kconfig @@ -6,6 +6,7 @@ config CRYPTO_DEV_TI_DTHEV2 select CRYPTO_SKCIPHER select CRYPTO_ECB select CRYPTO_CBC + select CRYPTO_CTR select CRYPTO_XTS help This enables support for the TI DTHE V2 hw cryptography engine diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c index 156729ccc50e..bf7d4dcb4cd7 100644 --- a/drivers/crypto/ti/dthev2-aes.c +++ b/drivers/crypto/ti/dthev2-aes.c @@ -63,6 +63,7 @@ enum aes_ctrl_mode_masks { AES_CTRL_ECB_MASK = 0x00, AES_CTRL_CBC_MASK = BIT(5), + AES_CTRL_CTR_MASK = BIT(6), AES_CTRL_XTS_MASK = BIT(12) | BIT(11), }; @@ -74,6 +75,8 @@ enum aes_ctrl_mode_masks { #define DTHE_AES_CTRL_KEYSIZE_24B BIT(4) #define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4)) +#define DTHE_AES_CTRL_CTR_WIDTH_128B (BIT(7) | BIT(8)) + #define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29) #define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0) @@ -100,25 +103,27 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm) return 0; } -static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm) +static int dthe_cipher_init_tfm_fallback(struct crypto_skcipher *tfm) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct dthe_data *dev_data = dthe_get_dev(ctx); + const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); ctx->dev_data = dev_data; ctx->keylen = 0; - ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0, + ctx->skcipher_fb = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->skcipher_fb)) { - dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n"); + dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n", + alg_name); return PTR_ERR(ctx->skcipher_fb); } return 0; } -static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm) +static void dthe_cipher_exit_tfm(struct crypto_skcipher *tfm) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -156,6 +161,24 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig return dthe_aes_setkey(tfm, key, keylen); } +static int dthe_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret = dthe_aes_setkey(tfm, key, keylen); + + if (ret) + return ret; + + ctx->aes_mode = DTHE_AES_CTR; + + crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->skcipher_fb, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + + return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen); +} + static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -171,8 +194,8 @@ static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ctx->skcipher_fb, - crypto_skcipher_get_flags(tfm) & - CRYPTO_TFM_REQ_MASK); + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen); } @@ -236,6 +259,10 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, case DTHE_AES_CBC: ctrl_val |= AES_CTRL_CBC_MASK; break; + case DTHE_AES_CTR: + ctrl_val |= AES_CTRL_CTR_MASK; + ctrl_val |= DTHE_AES_CTRL_CTR_WIDTH_128B; + break; case DTHE_AES_XTS: ctrl_val |= AES_CTRL_XTS_MASK; break; @@ -251,6 +278,22 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL); } +static int dthe_aes_do_fallback(struct skcipher_request *req) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req); + + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb); + + skcipher_request_set_callback(subreq, skcipher_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + + return rctx->enc ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); +} + static void dthe_aes_dma_in_callback(void *data) { struct skcipher_request *req = (struct skcipher_request *)data; @@ -271,7 +314,7 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq) struct scatterlist *dst = req->dst; int src_nents = sg_nents_for_len(src, len); - int dst_nents; + int dst_nents = sg_nents_for_len(dst, len); int src_mapped_nents; int dst_mapped_nents; @@ -305,25 +348,62 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq) dst_dir = DMA_FROM_DEVICE; } + /* + * CTR mode can operate on any input length, but the hardware + * requires input length to be a multiple of the block size. + * We need to handle the padding in the driver. + */ + if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) { + unsigned int pad_size = AES_BLOCK_SIZE - (req->cryptlen % AES_BLOCK_SIZE); + u8 *pad_buf = rctx->padding; + struct scatterlist *sg; + + len += pad_size; + src_nents++; + dst_nents++; + + src = kmalloc_array(src_nents, sizeof(*src), GFP_ATOMIC); + if (!src) { + ret = -ENOMEM; + goto aes_ctr_src_alloc_err; + } + + sg_init_table(src, src_nents); + sg = dthe_copy_sg(src, req->src, req->cryptlen); + memzero_explicit(pad_buf, AES_BLOCK_SIZE); + sg_set_buf(sg, pad_buf, pad_size); + + if (diff_dst) { + dst = kmalloc_array(dst_nents, sizeof(*dst), GFP_ATOMIC); + if (!dst) { + ret = -ENOMEM; + goto aes_ctr_dst_alloc_err; + } + + sg_init_table(dst, dst_nents); + sg = dthe_copy_sg(dst, req->dst, req->cryptlen); + sg_set_buf(sg, pad_buf, pad_size); + } else { + dst = src; + } + } + tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx); rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx); src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir); if (src_mapped_nents == 0) { ret = -EINVAL; - goto aes_err; + goto aes_map_src_err; } if (!diff_dst) { - dst_nents = src_nents; dst_mapped_nents = src_mapped_nents; } else { - dst_nents = sg_nents_for_len(dst, len); dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir); if (dst_mapped_nents == 0) { - dma_unmap_sg(tx_dev, src, src_nents, src_dir); ret = -EINVAL; - goto aes_err; + goto aes_map_dst_err; } } @@ -353,8 +433,8 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq) else dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv); - writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0); - writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1); + writel_relaxed(lower_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_0); + writel_relaxed(upper_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_1); dmaengine_submit(desc_in); dmaengine_submit(desc_out); @@ -386,11 +466,26 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq) } aes_prep_err: - dma_unmap_sg(tx_dev, src, src_nents, src_dir); if (dst_dir != DMA_BIDIRECTIONAL) dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir); +aes_map_dst_err: + dma_unmap_sg(tx_dev, src, src_nents, src_dir); + +aes_map_src_err: + if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) { + memzero_explicit(rctx->padding, AES_BLOCK_SIZE); + if (diff_dst) + kfree(dst); +aes_ctr_dst_alloc_err: + kfree(src); +aes_ctr_src_alloc_err: + /* + * Fallback to software if ENOMEM + */ + if (ret == -ENOMEM) + ret = dthe_aes_do_fallback(req); + } -aes_err: local_bh_disable(); crypto_finalize_skcipher_request(dev_data->engine, req, ret); local_bh_enable(); @@ -400,7 +495,6 @@ aes_err: static int dthe_aes_crypt(struct skcipher_request *req) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dthe_data *dev_data = dthe_get_dev(ctx); struct crypto_engine *engine; @@ -408,20 +502,14 @@ static int dthe_aes_crypt(struct skcipher_request *req) * If data is not a multiple of AES_BLOCK_SIZE: * - need to return -EINVAL for ECB, CBC as they are block ciphers * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS + * - do nothing for CTR */ if (req->cryptlen % AES_BLOCK_SIZE) { - if (ctx->aes_mode == DTHE_AES_XTS) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb); + if (ctx->aes_mode == DTHE_AES_XTS) + return dthe_aes_do_fallback(req); - skcipher_request_set_callback(subreq, skcipher_request_flags(req), - req->base.complete, req->base.data); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - - return rctx->enc ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - } - return -EINVAL; + if (ctx->aes_mode != DTHE_AES_CTR) + return -EINVAL; } /* @@ -501,8 +589,33 @@ static struct skcipher_engine_alg cipher_algs[] = { .op.do_one_request = dthe_aes_run, }, /* CBC AES */ { - .base.init = dthe_cipher_xts_init_tfm, - .base.exit = dthe_cipher_xts_exit_tfm, + .base.init = dthe_cipher_init_tfm_fallback, + .base.exit = dthe_cipher_exit_tfm, + .base.setkey = dthe_aes_ctr_setkey, + .base.encrypt = dthe_aes_encrypt, + .base.decrypt = dthe_aes_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.ivsize = AES_IV_SIZE, + .base.chunksize = AES_BLOCK_SIZE, + .base.base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-dthev2", + .cra_priority = 299, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct dthe_tfm_ctx), + .cra_reqsize = sizeof(struct dthe_aes_req_ctx), + .cra_module = THIS_MODULE, + }, + .op.do_one_request = dthe_aes_run, + }, /* CTR AES */ + { + .base.init = dthe_cipher_init_tfm_fallback, + .base.exit = dthe_cipher_exit_tfm, .base.setkey = dthe_aes_xts_setkey, .base.encrypt = dthe_aes_encrypt, .base.decrypt = dthe_aes_decrypt, diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c index c39d37933b9e..a2ad79bec105 100644 --- a/drivers/crypto/ti/dthev2-common.c +++ b/drivers/crypto/ti/dthev2-common.c @@ -48,6 +48,25 @@ struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx) return dev_data; } +struct scatterlist *dthe_copy_sg(struct scatterlist *dst, + struct scatterlist *src, + int buflen) +{ + struct scatterlist *from_sg, *to_sg; + int sglen; + + for (to_sg = dst, from_sg = src; buflen && from_sg; buflen -= sglen) { + sglen = from_sg->length; + if (sglen > buflen) + sglen = buflen; + sg_set_buf(to_sg, sg_virt(from_sg), sglen); + from_sg = sg_next(from_sg); + to_sg = sg_next(to_sg); + } + + return to_sg; +} + static int dthe_dma_init(struct dthe_data *dev_data) { int ret; diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h index c7a06a4c353f..5239ee93c944 100644 --- a/drivers/crypto/ti/dthev2-common.h +++ b/drivers/crypto/ti/dthev2-common.h @@ -36,6 +36,7 @@ enum dthe_aes_mode { DTHE_AES_ECB = 0, DTHE_AES_CBC, + DTHE_AES_CTR, DTHE_AES_XTS, }; @@ -92,10 +93,12 @@ struct dthe_tfm_ctx { /** * struct dthe_aes_req_ctx - AES engine req ctx struct * @enc: flag indicating encryption or decryption operation + * @padding: padding buffer for handling unaligned data * @aes_compl: Completion variable for use in manual completion in case of DMA callback failure */ struct dthe_aes_req_ctx { int enc; + u8 padding[AES_BLOCK_SIZE]; struct completion aes_compl; }; @@ -103,6 +106,20 @@ struct dthe_aes_req_ctx { struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx); +/** + * dthe_copy_sg - Copy sg entries from src to dst + * @dst: Destination sg to be filled + * @src: Source sg to be copied from + * @buflen: Number of bytes to be copied + * + * Description: + * Copy buflen bytes of data from src to dst. + * + **/ +struct scatterlist *dthe_copy_sg(struct scatterlist *dst, + struct scatterlist *src, + int buflen); + int dthe_register_aes_algs(void); void dthe_unregister_aes_algs(void); From 8456e55162aa38d65d3dcccf9a0eb3e33c01d1f0 Mon Sep 17 00:00:00 2001 From: Alexander Dahl Date: Thu, 26 Feb 2026 16:56:27 +0100 Subject: [PATCH 025/129] crypto: docs/userspace-if - Fix outdated links According to archive.org the site threw HTTP errors 404 since early 2024. The last snapshot in the archive having actual content was from late 2023. The page behind the new URL has more or less the same content as the archived page from 2023, so it probably was just moved without setting up a redirect. Signed-off-by: Alexander Dahl Signed-off-by: Herbert Xu --- Documentation/crypto/userspace-if.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst index 8158b363cd98..021759198fe7 100644 --- a/Documentation/crypto/userspace-if.rst +++ b/Documentation/crypto/userspace-if.rst @@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous and asynchronous invocations. The user space API call is fully synchronous. -[1] https://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi/index.html User Space API General Remarks ------------------------------ @@ -406,4 +406,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around the aforementioned Netlink kernel interface. [1] also contains a test application that invokes all libkcapi API calls. -[1] https://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi/index.html From 76755a576a7676ea75dcf2c1b06907876dea6622 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 27 Feb 2026 12:53:56 +0100 Subject: [PATCH 026/129] crypto: qat - Drop redundant local variables Return sysfs_emit() directly and drop 'ret' in cap_rem_show(). In cap_rem_store(), use 'ret' when calling set_param_u() instead of assigning it to 'val' first, and remove 'val'. Signed-off-by: Thorsten Blum Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c index f31556beed8b..89bfd8761d75 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -321,7 +321,7 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, { struct adf_rl_interface_data *data; struct adf_accel_dev *accel_dev; - int ret, rem_cap; + int rem_cap; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) @@ -336,23 +336,19 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, if (rem_cap < 0) return rem_cap; - ret = sysfs_emit(buf, "%u\n", rem_cap); - - return ret; + return sysfs_emit(buf, "%u\n", rem_cap); } static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - unsigned int val; int ret; ret = sysfs_match_string(rl_services, buf); if (ret < 0) return ret; - val = ret; - ret = set_param_u(dev, CAP_REM_SRV, val); + ret = set_param_u(dev, CAP_REM_SRV, ret); if (ret) return ret; From 25ab621f7de76fbcd6bfeb8915f8882785323674 Mon Sep 17 00:00:00 2001 From: Robert Marko Date: Mon, 2 Mar 2026 12:20:10 +0100 Subject: [PATCH 027/129] dt-bindings: rng: atmel,at91-trng: add microchip,lan9691-trng Document Microchip LAN969X TRNG compatible. Signed-off-by: Robert Marko Acked-by: Conor Dooley Reviewed-by: Claudiu Beznea Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml index f78614100ea8..3628251b8c51 100644 --- a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml +++ b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml @@ -19,6 +19,7 @@ properties: - microchip,sam9x60-trng - items: - enum: + - microchip,lan9691-trng - microchip,sama7g5-trng - const: atmel,at91sam9g45-trng - items: From d23cf35880114a1ad24efc262577e494ebfa0c23 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 2 Mar 2026 12:34:53 +0100 Subject: [PATCH 028/129] crypto: qce - Remove return variable and unused assignments In qce_aead_done(), the return variable 'ret' is no longer used - remove it. And qce_aead_prepare_dst_buf() jumps directly to 'dst_tbl_free:' on error and returns 'sg' - drop the useless 'ret' assignments. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/qce/aead.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 1b487df45f36..79e92bc3f7d3 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -36,7 +36,6 @@ static void qce_aead_done(void *data) u32 status; unsigned int totallen; unsigned char tag[SHA256_DIGEST_SIZE] = {0}; - int ret = 0; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; @@ -80,8 +79,7 @@ static void qce_aead_done(void *data) } else if (!IS_CCM(rctx->flags)) { totallen = req->cryptlen + req->assoclen - ctx->authsize; scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0); - ret = memcmp(result_buf->auth_iv, tag, ctx->authsize); - if (ret) { + if (memcmp(result_buf->auth_iv, tag, ctx->authsize)) { pr_err("Bad message error\n"); error = -EBADMSG; } @@ -145,16 +143,12 @@ qce_aead_prepare_dst_buf(struct aead_request *req) sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg, rctx->assoclen); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + if (IS_ERR(sg)) goto dst_tbl_free; - } /* dst buffer */ sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + if (IS_ERR(sg)) goto dst_tbl_free; - } totallen = rctx->cryptlen + rctx->assoclen; } else { if (totallen) { From 35a89319f60a48fb8cd07617f8e2c4649edbe361 Mon Sep 17 00:00:00 2001 From: "Tycho Andersen (AMD)" Date: Mon, 2 Mar 2026 08:02:23 -0700 Subject: [PATCH 029/129] crypto: ccp - simplify sev_update_firmware() sev_do_cmd() has its own command buffer (sev->cmd_buf) with the correct alignment, perms, etc. that it copies the command into, so prepending it to the firmware data is unnecessary. Switch sev_update_firmware() to using a stack allocated command in light of this copy, and drop all of the resulting pointer math. Signed-off-by: Tycho Andersen (AMD) Reviewed-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8b2dfc11289b..89a11a741db7 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1965,11 +1965,11 @@ static int sev_get_firmware(struct device *dev, /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ static int sev_update_firmware(struct device *dev) { - struct sev_data_download_firmware *data; + struct sev_data_download_firmware data; const struct firmware *firmware; int ret, error, order; struct page *p; - u64 data_size; + void *fw_blob; if (!sev_version_greater_or_equal(0, 15)) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); @@ -1981,16 +1981,7 @@ static int sev_update_firmware(struct device *dev) return -1; } - /* - * SEV FW expects the physical address given to it to be 32 - * byte aligned. Memory allocated has structure placed at the - * beginning followed by the firmware being passed to the SEV - * FW. Allocate enough memory for data structure + alignment - * padding + SEV FW. - */ - data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); - - order = get_order(firmware->size + data_size); + order = get_order(firmware->size); p = alloc_pages(GFP_KERNEL, order); if (!p) { ret = -1; @@ -2001,20 +1992,20 @@ static int sev_update_firmware(struct device *dev) * Copy firmware data to a kernel allocated contiguous * memory region. */ - data = page_address(p); - memcpy(page_address(p) + data_size, firmware->data, firmware->size); + fw_blob = page_address(p); + memcpy(fw_blob, firmware->data, firmware->size); - data->address = __psp_pa(page_address(p) + data_size); - data->len = firmware->size; + data.address = __psp_pa(fw_blob); + data.len = firmware->size; - ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error); /* * A quirk for fixing the committed TCB version, when upgrading from * earlier firmware version than 1.50. */ if (!ret && !sev_version_greater_or_equal(1, 50)) - ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error); if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); From 3ac949881396361b6462a717f6cbbd97f368af02 Mon Sep 17 00:00:00 2001 From: "Tycho Andersen (AMD)" Date: Mon, 2 Mar 2026 08:02:24 -0700 Subject: [PATCH 030/129] include/psp-sev.h: fix structure member in comment The member is 'data', not 'opaque'. Signed-off-by: Tycho Andersen (AMD) Reviewed-by: Tom Lendacky Signed-off-by: Herbert Xu --- include/uapi/linux/psp-sev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 2b5b042eb73b..52dae70b058b 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -277,7 +277,7 @@ struct sev_user_data_snp_wrapped_vlek_hashstick { * struct sev_issue_cmd - SEV ioctl parameters * * @cmd: SEV commands to execute - * @opaque: pointer to the command structure + * @data: pointer to the command structure * @error: SEV FW return code on failure */ struct sev_issue_cmd { From 32e76e3757e89f370bf2ac8dba8aeb133071834e Mon Sep 17 00:00:00 2001 From: Saeed Mirzamohammadi Date: Mon, 2 Mar 2026 15:59:14 -0800 Subject: [PATCH 031/129] crypto: tcrypt - clamp num_mb to avoid divide-by-zero Passing num_mb=0 to the multibuffer speed tests leaves test_mb_aead_cycles() and test_mb_acipher_cycles() dividing by (8 * num_mb). With sec=0 (the default), the module prints "1 operation in ..." and hits a divide-by-zero fault. Force num_mb to at least 1 during module init and warn the caller so the warm-up loop and the final report stay well-defined. To reproduce: sudo modprobe tcrypt mode=600 num_mb=0 Signed-off-by: Saeed Mirzamohammadi Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index aded37546137..61c8cf55c4f1 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2808,6 +2808,11 @@ static int __init tcrypt_mod_init(void) goto err_free_tv; } + if (!num_mb) { + pr_warn("num_mb must be at least 1; forcing to 1\n"); + num_mb = 1; + } + err = do_test(alg, type, mask, mode, num_mb); if (err) { From 03170b8f84354f1649a757e57c2130e1de237f5d Mon Sep 17 00:00:00 2001 From: Saeed Mirzamohammadi Date: Mon, 2 Mar 2026 16:06:40 -0800 Subject: [PATCH 032/129] crypto: tcrypt - stop ahash speed tests when setkey fails The async hash speed path ignores the return code from crypto_ahash_setkey(). If the caller picks an unsupported key length, the transform keeps whatever key state it already has and the speed test still runs, producing misleading numbers, hence bail out of the loop when setkey fails. Signed-off-by: Saeed Mirzamohammadi Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 61c8cf55c4f1..db860f45765f 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -911,8 +911,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, break; } - if (klen) - crypto_ahash_setkey(tfm, tvmem[0], klen); + if (klen) { + ret = crypto_ahash_setkey(tfm, tvmem[0], klen); + if (ret) { + pr_err("setkey() failed flags=%x: %d\n", + crypto_ahash_get_flags(tfm), ret); + break; + } + } pr_info("test%3u " "(%5u byte blocks,%5u bytes per update,%4u updates): ", From 1e84df6ccfcb342262b02dfdb723eaad50a0b6c9 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Tue, 3 Mar 2026 19:53:49 +0100 Subject: [PATCH 033/129] dt-bindings: crypto: inside-secure,safexcel: add compatible for MT7981 The MT7981 as well as the MT7986 have a built-in EIP-97 crypto accelerator. This commit adds a compatible string for MT7981. Signed-off-by: Aleksander Jan Bajkowski Reviewed-by: AngeloGioacchino Del Regno Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/inside-secure,safexcel.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml index 3dc6c5f89d32..a34d13e92c59 100644 --- a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml +++ b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml @@ -18,6 +18,7 @@ properties: - items: - enum: - marvell,armada-3700-crypto + - mediatek,mt7981-crypto - mediatek,mt7986-crypto - const: inside-secure,safexcel-eip97ies - const: inside-secure,safexcel-eip197b @@ -80,7 +81,9 @@ allOf: compatible: not: contains: - const: mediatek,mt7986-crypto + enum: + - mediatek,mt7981-crypto + - mediatek,mt7986-crypto then: properties: interrupts: From f06b4ee3351dee90d422305d164a7aa353c5fdd1 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 4 Mar 2026 09:24:02 +0100 Subject: [PATCH 034/129] crypto: atmel-sha204a - Drop redundant I2C_FUNC_I2C check atmel_i2c_probe() already verifies I2C_FUNC_I2C - remove the redundant check in atmel_sha204a_probe(). Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha204a.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index f9c7a4206774..691531647fd6 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -181,10 +181,6 @@ static int atmel_sha204a_probe(struct i2c_client *client) if (ret) dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); - /* otp read out */ - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) - return -ENODEV; - ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups); if (ret) { dev_err(&client->dev, "failed to register sysfs entry\n"); From 4963b39e3a3feed07fbf4d5cc2b5df8498888285 Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Thu, 5 Mar 2026 08:58:58 +0000 Subject: [PATCH 035/129] crypto: qat - fix indentation of macros in qat_hal.c The macros in qat_hal.c were using a mixture of tabs and spaces. Update all macro indentation to use tabs consistently, matching the predominant style. This does not introduce any functional change. Signed-off-by: Suman Kumar Chakraborty Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_hal.c | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index 7a6ba6f22e3e..0f5a2690690a 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -9,17 +9,17 @@ #include "icp_qat_hal.h" #include "icp_qat_uclo.h" -#define BAD_REGADDR 0xffff -#define MAX_RETRY_TIMES 10000 -#define INIT_CTX_ARB_VALUE 0x0 -#define INIT_CTX_ENABLE_VALUE 0x0 -#define INIT_PC_VALUE 0x0 -#define INIT_WAKEUP_EVENTS_VALUE 0x1 -#define INIT_SIG_EVENTS_VALUE 0x1 -#define INIT_CCENABLE_VALUE 0x2000 -#define RST_CSR_QAT_LSB 20 -#define RST_CSR_AE_LSB 0 -#define MC_TIMESTAMP_ENABLE (0x1 << 7) +#define BAD_REGADDR 0xffff +#define MAX_RETRY_TIMES 10000 +#define INIT_CTX_ARB_VALUE 0x0 +#define INIT_CTX_ENABLE_VALUE 0x0 +#define INIT_PC_VALUE 0x0 +#define INIT_WAKEUP_EVENTS_VALUE 0x1 +#define INIT_SIG_EVENTS_VALUE 0x1 +#define INIT_CCENABLE_VALUE 0x2000 +#define RST_CSR_QAT_LSB 20 +#define RST_CSR_AE_LSB 0 +#define MC_TIMESTAMP_ENABLE (0x1 << 7) #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ From e7dcb722bb75bb3f3992f580a8728a794732fd7a Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Thu, 5 Mar 2026 08:58:59 +0000 Subject: [PATCH 036/129] crypto: qat - fix firmware loading failure for GEN6 devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit QAT GEN6 hardware requires a minimum 3 us delay during the acceleration engine reset sequence to ensure the hardware fully settles. Without this delay, the firmware load may fail intermittently. Add a delay after placing the AE into reset and before clearing the reset, matching the hardware requirements and ensuring stable firmware loading. Earlier generations remain unaffected. Fixes: 17fd7514ae68 ("crypto: qat - add qat_6xxx driver") Signed-off-by: Suman Kumar Chakraborty Cc: stable@vger.kernel.org Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_accel_engine.c | 7 +++++++ .../crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h | 1 + drivers/crypto/intel/qat/qat_common/qat_hal.c | 5 ++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c index f9f1018a2823..09d4f547e082 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ +#include #include #include #include "adf_cfg.h" @@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + unsigned long reset_delay; qat_hal_reset(loader_data->fw_loader); + + reset_delay = loader_data->fw_loader->chip_info->reset_delay_us; + if (reset_delay) + fsleep(reset_delay); + if (qat_hal_clr_reset(loader_data->fw_loader)) return -EFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h index 6887930c7995..e74cafa95f1c 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h @@ -27,6 +27,7 @@ struct icp_qat_fw_loader_chip_info { int mmp_sram_size; bool nn; bool lm2lm3; + u16 reset_delay_us; u32 lm_size; u32 icp_rst_csr; u32 icp_rst_mask; diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index 0f5a2690690a..1c3d1311f1c7 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -20,6 +20,7 @@ #define RST_CSR_QAT_LSB 20 #define RST_CSR_AE_LSB 0 #define MC_TIMESTAMP_ENABLE (0x1 << 7) +#define MIN_RESET_DELAY_US 3 #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ @@ -713,8 +714,10 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->chip_info->wakeup_event_val = 0x80000000; handle->chip_info->fw_auth = true; handle->chip_info->css_3k = true; - if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) + if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) { handle->chip_info->dual_sign = true; + handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US; + } handle->chip_info->tgroup_share_ustore = true; handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX; handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX; From 68095ad9de9361844235c1e4e3bd5632f6b21929 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 5 Mar 2026 09:21:33 -0800 Subject: [PATCH 037/129] MAINTAINERS: Remove bouncing maintaner for IAA driver This maintainer's email is now bouncing. Remove them. Cc: Kristen Accardi Cc: Vinicius Costa Gomes Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Dave Hansen Signed-off-by: Herbert Xu --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index ee1f1b8eca79..a84e1e2af273 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12875,7 +12875,6 @@ F: drivers/dma/ioat* INTEL IAA CRYPTO DRIVER M: Kristen Accardi M: Vinicius Costa Gomes -M: Kanchana P Sridhar L: linux-crypto@vger.kernel.org S: Supported F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst From b45b4314d3e55be70b597baa1f0ab9283e68003b Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 5 Mar 2026 21:08:20 +0100 Subject: [PATCH 038/129] crypto: testmgr - Add test vectors for authenc(hmac(sha1),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(SHA1) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 +- crypto/testmgr.h | 221 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 226 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 8e92971b13ec..9a7986efb87a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4135,8 +4135,12 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))", - .test = alg_test_null, + .generic_driver = "authenc(hmac-sha1-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, .fips_allowed = 1, + .suite = { + .aead = __VECS(hmac_sha1_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "authenc(hmac(sha224),cbc(aes))", .generic_driver = "authenc(hmac-sha224-lib,cbc(aes-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 94727397988a..223b5a3f24df 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -14873,6 +14873,227 @@ static const struct aead_testvec hmac_sha1_aes_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_sha1_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 20 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\x70\xdc\x6b\x62\x43\xa1\x2f\x08" + "\xf1\xec\x93\x7d\x69\xb2\x8e\x1f" + "\x0a\x97\x39\x86", + .clen = 16 + 20, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 20 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\x6b\x7b\x4d\x39\x36\x1c\x12\x5f" + "\x72\xd2\x88\xb2\x26\xa6\xa6\xb5" + "\x1d\x3a\x49\xa6", + .clen = 32 + 20, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 20 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\x2c\x86\xa0\x90\x8e\xc1\x02\x1d" + "\x51\xdc\xd6\x21\xc7\x30\xcc\x32" + "\x38\x55\x47\x64", + .clen = 36 + 20, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 20 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\xe9\x4e\x49\xf0\x6b\x8d\x58\x2b" + "\x26\x7f\xf3\xab\xeb\x2f\x74\x2f" + "\x45\x43\x64\xc1", + .clen = 16 + 20, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 20 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\xab\xc4\xfa\x6d\x20\xe1\xce\x72" + "\x0e\x92\x4e\x97\xaa\x4d\x30\x84" + "\xb6\xd8\x4d\x3b", + .clen = 32 + 20, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 20 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\x3d\x6c\x23\x27\xda\x0e\x7f\x29" + "\xfd\x8d\x3c\x1b\xf7\x7a\x63\xd9" + "\x7e\x0f\xe9\xf6", + .clen = 16 + 20, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 20 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\xe7\xee\x22\xa4\xdd\xbf\x5d\x44" + "\x3b\x43\x1c\x69\x55\x11\xd5\xad" + "\x14\x5f\x44\xa6", + .clen = 32 + 20, + }, +}; + static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN From d46c27c01f132082095342b5abf4e83e250b70b8 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 5 Mar 2026 21:08:21 +0100 Subject: [PATCH 039/129] crypto: testmgr - Add test vectors for authenc(hmac(sha224),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(SHA224) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 +- crypto/testmgr.h | 235 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 240 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 9a7986efb87a..f7d020a64c6a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4165,8 +4165,12 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha224),rfc3686(ctr(aes)))", - .test = alg_test_null, + .generic_driver = "authenc(hmac-sha224-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, .fips_allowed = 1, + .suite = { + .aead = __VECS(hmac_sha224_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "authenc(hmac(sha256),cbc(aes))", .generic_driver = "authenc(hmac-sha256-lib,cbc(aes-lib))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 223b5a3f24df..33fa867a019d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -15425,6 +15425,241 @@ static const struct aead_testvec hmac_sha224_aes_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_sha224_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 28 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\x36\xb4\x3b\x9c\x62\xed\xcf\x77" + "\xdc\x19\x27\x3f\x92\x80\x52\xce" + "\x8f\xad\x01\x0b\x79\xda\x04\x83" + "\xcb\x45\x1a\x52", + .clen = 16 + 28, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 28 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\x7f\xe4\x8f\xa7\x06\x71\xe9\xe5" + "\x16\x79\xef\xf9\x7e\x5c\x93\x4d" + "\xa0\xf8\x3b\x3a\xaa\x1c\xc0\xd9" + "\x6b\x48\x49\x01", + .clen = 32 + 28, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 28 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\xb0\x19\x45\xee\xa7\x31\xd9\xd0" + "\x74\x6b\xb8\xb1\x67\x61\x2f\x8c" + "\x68\xde\xe3\xc9\x3b\x0c\x72\xda" + "\x48\xba\x1b\x51", + .clen = 36 + 28, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 28 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\xfd\xf5\x35\x26\x50\x3d\xdf\x80" + "\x6e\xbe\xba\x8d\x56\xf3\x03\xb7" + "\x27\xb8\x13\xe8\x72\x8f\xc9\x52" + "\x4a\xb7\xc3\x3a", + .clen = 16 + 28, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 28 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\x72\x89\xa8\x04\xa5\xac\x8f\x29" + "\xe6\xb8\x58\xe8\xcf\x6a\x91\x89" + "\xd3\x66\x3b\xdc\xce\x43\x23\xb7" + "\x6a\xdd\x9d\xbd", + .clen = 32 + 28, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 28 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\xfe\xdf\x6f\x62\x8a\x79\xb5\x34" + "\xd0\x6f\x32\xaf\x31\x50\x5b\x1f" + "\xe0\x6d\x0b\xbc\x02\x25\xee\x74" + "\x7a\xdf\x97\x3c", + .clen = 16 + 28, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 28 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\x19\x1e\x9c\x2c\x6d\x4e\x21\xda" + "\x6c\x4d\x88\x90\xf8\x5f\xa5\x9d" + "\xb4\xd4\x40\xad\xfa\x67\x3f\x0e" + "\x11\x12\xd6\x10", + .clen = 32 + 28, + }, +}; + static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN From 5ac6b904c70ff163fd2f9e152056300ce5ed6c26 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 5 Mar 2026 21:08:22 +0100 Subject: [PATCH 040/129] crypto: testmgr - Add test vectors for authenc(hmac(sha256),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(SHA256) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 +- crypto/testmgr.h | 235 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 240 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f7d020a64c6a..ceb61327743f 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4206,8 +4206,12 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))", - .test = alg_test_null, + .generic_driver = "authenc(hmac-sha256-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, .fips_allowed = 1, + .suite = { + .aead = __VECS(hmac_sha256_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "authenc(hmac(sha384),cbc(aes))", .generic_driver = "authenc(hmac-sha384-lib,cbc(aes-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 33fa867a019d..1710809c951a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -15943,6 +15943,241 @@ static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_sha256_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 32 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\x9b\xa2\x34\x62\xe5\xb3\xe8\x2d" + "\x6d\xdb\x93\x64\xa5\x08\x2e\x77" + "\x72\x1f\x21\x94\xc7\xbe\x14\xa6" + "\xcd\xea\x96\xa1\x29\x8f\x30\xc3", + .clen = 16 + 32, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 32 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\x22\xf7\x95\xa8\xbb\xcd\x19\xf4" + "\x58\x16\x54\x28\x2b\xf4\x52\xe7" + "\x5c\x6c\xe1\x44\x0b\xd5\x10\x6e" + "\xe1\xf7\x04\xc4\x2c\xab\x93\xdd", + .clen = 32 + 32, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 32 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\x1d\x05\x5f\x77\x3b\x4f\x5c\x21" + "\x29\xea\xf1\xa8\x71\x49\x7b\x0b" + "\x66\x0d\xff\x18\x81\x63\xfc\xc3" + "\x91\xb6\x38\xc8\xcd\x2d\x39\x83", + .clen = 36 + 32, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 32 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\x8d\x03\x77\xb2\x1c\xc9\xe0\xac" + "\xde\x69\xbe\x8a\xef\x5b\x13\x74" + "\x1d\x39\xbc\xdc\x95\xa4\xbf\xc3" + "\xd5\xc6\xd1\xda\xda\x3b\xca\x78", + .clen = 16 + 32, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 32 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\x34\x06\x2b\x3d\xf1\xa8\x3d\xf1" + "\xa6\x5e\x5c\x1a\xdb\x0c\xb5\x1e" + "\x8f\xdb\xf4\xca\x7d\x09\x5e\x81" + "\xdb\x32\x07\x4a\x1d\x1c\x6d\x83", + .clen = 32 + 32, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 32 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\xc3\xb4\x5f\xb0\xbf\xf5\x1b\xff" + "\x7c\xf1\x79\x00\x63\x50\xdd\x77" + "\xc0\x4a\xba\xcd\xdc\x47\x05\x2a" + "\x5d\x85\x2d\x83\x44\xca\x79\x2c", + .clen = 16 + 32, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 32 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\xc8\x59\x5d\xe1\xba\xac\x13\x82" + "\xfd\x21\x7c\x8c\x23\x31\x04\x02" + "\x9e\x69\x5b\x57\xa8\x13\xe7\x21" + "\x60\x0c\x24\xc2\x80\x4a\x93\x6e", + .clen = 32 + 32, + }, +}; + static const struct aead_testvec hmac_sha384_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN From 2f0814271715f974ae1fc6247c9918906c83e24b Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 5 Mar 2026 21:08:23 +0100 Subject: [PATCH 041/129] crypto: testmgr - Add test vectors for authenc(hmac(sha384),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(SHA384) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 +- crypto/testmgr.h | 263 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 268 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ceb61327743f..18fa0f3d5701 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4247,8 +4247,12 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))", - .test = alg_test_null, + .generic_driver = "authenc(hmac-sha384-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, .fips_allowed = 1, + .suite = { + .aead = __VECS(hmac_sha384_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "authenc(hmac(sha512),cbc(aes))", .generic_driver = "authenc(hmac-sha512-lib,cbc(aes-lib))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1710809c951a..faecddec755c 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -16489,6 +16489,269 @@ static const struct aead_testvec hmac_sha384_aes_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_sha384_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 48 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\x45\x51\x59\x72\x16\xd3\xc6\x15" + "\x25\x1e\xe8\x92\x2e\x47\x52\xcc" + "\x91\x9c\x24\xef\x11\xb2\x53\x00" + "\x10\x20\x43\x06\xe2\x35\x88\x9e" + "\x18\x32\x5a\x79\x7d\x73\x7e\x89" + "\xfe\xa1\xda\xa4\x86\xc4\x2a\x04", + .clen = 16 + 48, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 48 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\x83\x65\x32\x1e\x6b\x60\xe6\x4a" + "\xe2\xab\x52\x2b\xa6\x70\x3a\xfa" + "\xd2\xec\x83\xe4\x31\x0c\x28\x40" + "\x9b\x5e\x18\xa4\xdc\x48\xb8\x56" + "\x33\xab\x7f\x2b\xaf\xe4\x3a\xe3" + "\x8a\x61\xf6\x22\xb4\x6b\xfe\x7d", + .clen = 32 + 48, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x33\x44\x55\x66\x77\x88\x99\xaa" + "\xbb\xcc\xdd\xee\xff\x11\x22\x33" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 48 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\x4a\xaa\xad\x3b\x3b\xb6\x9a\xba" + "\xa1\x7b\xc6\xce\x96\xc3\xff\x67" + "\xf3\x0c\x33\x57\xf0\x51\x24\x08" + "\xed\x4f\x6a\x9c\x22\x42\xbd\x18" + "\x97\x74\x68\x36\x00\xf1\x69\x3a" + "\x18\x77\x40\xf0\x56\xba\xba\xe0", + .clen = 36 + 48, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 48 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\x36\xd6\xc7\x55\xac\xb6\x0b\x14" + "\x95\x71\xf9\x86\x30\xe3\x96\xc3" + "\x76\x85\x6d\xa5\x06\xed\x6f\x34" + "\xcc\x1f\xcc\x2d\x88\x06\xb0\x1d" + "\xbe\xd9\xa2\xd3\x64\xf1\x33\x03" + "\x13\x50\x8f\xae\x61\x2d\x82\xb8", + .clen = 16 + 48, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 48 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\x80\x12\x67\x22\xf2\x4d\x9b\xbf" + "\xdc\x38\xd3\xaa\x12\xc0\x58\x1a" + "\x9a\x62\x6e\x42\x3d\x44\x63\xdd" + "\xee\x7e\xe3\xa3\xdf\x2a\x65\x05" + "\xd0\xc1\xd2\x54\x55\x35\x5c\xc7" + "\xb0\xb5\xb1\x36\xe0\x0b\xaf\x72", + .clen = 32 + 48, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 48 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\xb1\x7b\xb1\xec\xca\x94\x55\xc4" + "\x3f\x2b\xb1\x70\x04\x91\xf5\x9d" + "\x1a\xc0\xe1\x2a\x93\x5f\x96\x2a" + "\x12\x85\x38\x36\xe1\xb2\xe9\xf0" + "\xf2\x6e\x5d\x81\xcc\x49\x07\x9c" + "\x5b\x88\xc8\xcc\xc4\x21\x4f\x32", + .clen = 16 + 48, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 48 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\xd6\x96\xbb\x12\x39\xc4\x4d\xe2" + "\x4c\x02\xe7\x1f\xdc\xb2\xb1\x57" + "\x38\x0d\xdd\x13\xb3\x89\x57\x9e" + "\x1f\xb5\x48\x32\xc4\xd3\x9d\x1f" + "\x68\xab\x8d\xc6\xa8\x05\x3a\xc2" + "\x87\xaf\x23\xb3\xe4\x1b\xde\xb3", + .clen = 32 + 48, + }, +}; + static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN From 82fc2b17fa5b9b12d34770afcc8e3c4288735429 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 5 Mar 2026 21:08:24 +0100 Subject: [PATCH 042/129] crypto: testmgr - Add test vectors for authenc(hmac(sha512),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(SHA512) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 +- crypto/testmgr.h | 291 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 296 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 18fa0f3d5701..84beaa69cd59 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4281,8 +4281,12 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, }, { .alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))", - .test = alg_test_null, + .generic_driver = "authenc(hmac-sha512-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, .fips_allowed = 1, + .suite = { + .aead = __VECS(hmac_sha512_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "blake2b-160", .generic_driver = "blake2b-160-lib", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index faecddec755c..458fc8e87673 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -17148,6 +17148,297 @@ static const struct aead_testvec hmac_md5_des_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_sha512_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 64 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\xa4\x45\x3a\x44\x9c\xe5\x1c\xd9" + "\x10\x43\x51\x2e\x76\x5e\xf8\x9d" + "\x03\x12\x1a\x31\x00\x33\x10\xb4" + "\x94\x4b\x70\x84\x6c\xda\xb1\x46" + "\x24\xb6\x3b\x2a\xec\xd5\x67\xb8" + "\x65\xa2\xbd\xac\x18\xe2\xf8\x55" + "\xc6\x91\xb0\x92\x84\x2d\x74\x44" + "\xa7\xee\xc3\x44\xa0\x07\x0e\x62", + .clen = 16 + 64, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 64 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\xec\x67\x0d\xb3\xbd\x98\x13\x01" + "\x2b\x04\x9b\xe6\x06\x67\x3c\x76" + "\xcd\x41\xb7\xcc\x70\x6c\x7f\xc8" + "\x67\xbd\x22\x39\xb2\xaa\xe8\x88" + "\xe0\x4f\x81\x52\xdf\xc9\xc3\xd6" + "\x44\xf4\x66\x33\x87\x64\x61\x02" + "\x02\xa2\x64\x15\x2b\xe9\x0b\x3d" + "\x4c\xea\xa1\xa5\xa7\xc9\xd3\x1b", + .clen = 32 + 64, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x33\x44\x55\x66\x77\x88\x99\xaa" + "\xbb\xcc\xdd\xee\xff\x11\x22\x33" + "\x44\x55\x66\x77\x88\x99\xaa\xbb" + "\xcc\xdd\xee\xff\x11\x22\x33\x44" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 64 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\x6f\x90\xb6\xa3\x35\x43\x59\xff" + "\x1e\x32\xd6\xfe\xfa\x33\xf9\xf0" + "\x31\x2f\x03\x2d\x88\x1d\xab\xbf" + "\x0e\x19\x16\xd9\xf3\x98\x3e\xdd" + "\x0c\xec\xfe\xe8\x89\x13\x91\x15" + "\xf6\x61\x65\x5c\x1b\x7d\xde\xc0" + "\xe4\xba\x6d\x27\xe2\x89\x23\x24" + "\x15\x82\x37\x3d\x48\xd3\xc9\x32", + .clen = 36 + 64, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 64 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\x25\xea\xdc\xad\x52\xb8\x0f\x70" + "\xe7\x39\x83\x80\x10\x3f\x18\xc4" + "\xf8\x59\x14\x25\x5f\xba\x20\x87" + "\x0b\x04\x5e\xf7\xde\x41\x39\xff" + "\xa2\xee\x84\x3f\x9d\x38\xfd\x17" + "\xc0\x66\x5e\x74\x39\xe3\xd3\xd7" + "\x3d\xbc\xe3\x99\x2f\xe7\xef\x37" + "\x61\x03\xf3\x9e\x01\xaf\xba\x9d", + .clen = 16 + 64, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 64 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\x51\xa3\xe6\x1d\x23\x7d\xd1\x18" + "\x55\x9c\x1c\x92\x2b\xc2\xcd\xfe" + "\x8a\xa8\xa5\x96\x65\x2e\x9d\xdb" + "\x06\xd2\x1c\x57\x2b\x76\xb5\x9c" + "\xd4\x3e\x8b\x61\x54\x2d\x08\xe5" + "\xb2\xf8\x88\x20\x0c\xad\xe8\x85" + "\x61\x8e\x5c\xa4\x96\x2c\xe2\x7d" + "\x4f\xb6\x1d\xb2\x8c\xd7\xe3\x38", + .clen = 32 + 64, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 64 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\x6b\x68\x0b\x99\x9a\x4d\xc8\xb9" + "\x35\xea\xcd\x56\x3f\x40\xa2\xb6" + "\x68\xda\x59\xd8\xa0\x89\xcd\x52" + "\xb1\x6e\xed\xc1\x42\x10\xa5\x0f" + "\x88\x0b\x80\xce\xc4\x67\xf0\x45" + "\x5d\xb2\x9e\xde\x1c\x79\x52\x0d" + "\xff\x75\x36\xd5\x0f\x52\x8e\xe5" + "\x31\x85\xcf\x1d\x31\xf8\x62\x67", + .clen = 16 + 64, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 64 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\x9a\xac\x38\xbd\xf3\xcf\xd5\xd0" + "\x09\x07\xa6\xe1\x7f\xd6\x79\x98" + "\x4e\x90\x0e\xc0\x3d\xa0\xf2\x12" + "\x52\x79\x9c\x17\xff\xb9\xb8\xe3" + "\x2f\x31\xcb\xbd\x63\x70\x72\x7b" + "\x4e\x1e\xd1\xde\xb5\x6b\x7d\x54" + "\x68\x56\xdd\xe5\x53\xee\x29\xd2" + "\x85\xa1\x73\x61\x00\xa9\x26\x8f", + .clen = 32 + 64, + }, +}; + static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN From f4abb1af1bedafada8d7f814b85a7cf83d58f0b7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 5 Mar 2026 20:19:15 -0800 Subject: [PATCH 043/129] MAINTAINERS: remove outdated entry for crypto/rng.c Lore shows no emails from Neil on linux-crypto since 2020. Without the listed person being active, this MAINTAINERS entry provides no value, and actually is a bit confusing because while it is called the "CRYPTOGRAPHIC RANDOM NUMBER GENERATOR", it is not the CRNG that is normally used (drivers/char/random.c) which has a separate entry. Remove this entry, so crypto/rng.c will just be covered by "CRYPTO API". Cc: Neil Horman Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- MAINTAINERS | 6 ------ 1 file changed, 6 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index a84e1e2af273..106f8264ea8d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6829,12 +6829,6 @@ L: linux-crypto@vger.kernel.org S: Maintained F: tools/crypto/tcrypt/tcrypt_speed_compare.py -CRYPTOGRAPHIC RANDOM NUMBER GENERATOR -M: Neil Horman -L: linux-crypto@vger.kernel.org -S: Maintained -F: crypto/rng.c - CS3308 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org From 5377032914b29b4643adece0ff1dfc67e36700f4 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Fri, 6 Mar 2026 23:17:40 +0100 Subject: [PATCH 044/129] crypto: inside-secure/eip93 - register hash before authenc algorithms Register hash before hmac and authenc algorithms. This will ensure selftests pass at startup. Previously, selftests failed on the crypto_alloc_ahash() function since the associated algorithm was not yet registered. Fixes following error: ... [ 18.375811] alg: self-tests for authenc(hmac(sha1),cbc(aes)) using authenc(hmac(sha1-eip93),cbc(aes-eip93)) failed (rc=-2) [ 18.382140] alg: self-tests for authenc(hmac(sha224),rfc3686(ctr(aes))) using authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93))) failed (rc=-2) [ 18.395029] alg: aead: authenc(hmac(sha256-eip93),cbc(des-eip93)) setkey failed on test vector 0; expected_error=0, actual_error=-2, flags=0x1 [ 18.409734] alg: aead: authenc(hmac(md5-eip93),cbc(des3_ede-eip93)) setkey failed on test vector 0; expected_error=0, actual_error=-2, flags=0x1 ... Fixes: 9739f5f93b78 ("crypto: eip93 - Add Inside Secure SafeXcel EIP-93 crypto engine support") Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/eip93-main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c index b7fd9795062d..76858bb4fcc2 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-main.c +++ b/drivers/crypto/inside-secure/eip93/eip93-main.c @@ -36,6 +36,14 @@ static struct eip93_alg_template *eip93_algs[] = { &eip93_alg_cbc_aes, &eip93_alg_ctr_aes, &eip93_alg_rfc3686_aes, + &eip93_alg_md5, + &eip93_alg_sha1, + &eip93_alg_sha224, + &eip93_alg_sha256, + &eip93_alg_hmac_md5, + &eip93_alg_hmac_sha1, + &eip93_alg_hmac_sha224, + &eip93_alg_hmac_sha256, &eip93_alg_authenc_hmac_md5_cbc_des, &eip93_alg_authenc_hmac_sha1_cbc_des, &eip93_alg_authenc_hmac_sha224_cbc_des, @@ -52,14 +60,6 @@ static struct eip93_alg_template *eip93_algs[] = { &eip93_alg_authenc_hmac_sha1_rfc3686_aes, &eip93_alg_authenc_hmac_sha224_rfc3686_aes, &eip93_alg_authenc_hmac_sha256_rfc3686_aes, - &eip93_alg_md5, - &eip93_alg_sha1, - &eip93_alg_sha224, - &eip93_alg_sha256, - &eip93_alg_hmac_md5, - &eip93_alg_hmac_sha1, - &eip93_alg_hmac_sha224, - &eip93_alg_hmac_sha256, }; inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask) From c8a9a647532f5c2a04180352693215e24e9dba03 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 7 Mar 2026 16:31:10 +0100 Subject: [PATCH 045/129] crypto: atmel-tdes - fix DMA sync direction Before DMA output is consumed by the CPU, ->dma_addr_out must be synced with dma_sync_single_for_cpu() instead of dma_sync_single_for_device(). Using the wrong direction can return stale cache data on non-coherent platforms. Fixes: 13802005d8f2 ("crypto: atmel - add Atmel DES/TDES driver") Fixes: 1f858040c2f7 ("crypto: atmel-tdes - add support for latest release of the IP (0x700)") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 278c0df3c92f..643e507f9c02 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd) dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); } else { - dma_sync_single_for_device(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); /* copy data */ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, @@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); } else { - dma_sync_single_for_device(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); /* copy data */ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, From c708d3fad4217f23421b8496e231b0c5cee617a0 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 9 Mar 2026 00:22:32 +0100 Subject: [PATCH 046/129] crypto: atmel - use list_first_entry_or_null to simplify find_dev Use list_first_entry_or_null() to simplify atmel_sha_find_dev() and remove the now-unused local variable 'struct atmel_sha_dev *tmp'. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 1f1341a16c42..002b62902553 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -404,20 +404,13 @@ static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx) { - struct atmel_sha_dev *dd = NULL; - struct atmel_sha_dev *tmp; + struct atmel_sha_dev *dd; spin_lock_bh(&atmel_sha.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &atmel_sha.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - + if (!tctx->dd) + tctx->dd = list_first_entry_or_null(&atmel_sha.dev_list, + struct atmel_sha_dev, list); + dd = tctx->dd; spin_unlock_bh(&atmel_sha.lock); return dd; From e0ce97f781c78b717b00493630a9e34caf04f79b Mon Sep 17 00:00:00 2001 From: Wesley Atwell Date: Sun, 8 Mar 2026 22:31:43 -0600 Subject: [PATCH 047/129] crypto: simd - reject compat registrations without __ prefixes simd_register_skciphers_compat() and simd_register_aeads_compat() derive the wrapper algorithm names by stripping the __ prefix from the internal algorithm names. Currently they only WARN if cra_name or cra_driver_name lacks that prefix, but they still continue and unconditionally add 2 to both strings. That registers wrapper algorithms with incorrectly truncated names after a violated precondition. Reject such inputs with -EINVAL before registering anything, while keeping the warning so invalid internal API usage is still visible. Fixes: d14f0a1fc488 ("crypto: simd - allow registering multiple algorithms at once") Fixes: 1661131a0479 ("crypto: simd - support wrapping AEAD algorithms") Assisted-by: Codex:GPT-5 Signed-off-by: Wesley Atwell Signed-off-by: Herbert Xu --- crypto/simd.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crypto/simd.c b/crypto/simd.c index f71c4a334c7d..4e6f437e9e77 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -214,13 +214,17 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, const char *basename; struct simd_skcipher_alg *simd; + for (i = 0; i < count; i++) { + if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) || + strncmp(algs[i].base.cra_driver_name, "__", 2))) + return -EINVAL; + } + err = crypto_register_skciphers(algs, count); if (err) return err; for (i = 0; i < count; i++) { - WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); - WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; @@ -437,13 +441,17 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, const char *basename; struct simd_aead_alg *simd; + for (i = 0; i < count; i++) { + if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) || + strncmp(algs[i].base.cra_driver_name, "__", 2))) + return -EINVAL; + } + err = crypto_register_aeads(algs, count); if (err) return err; for (i = 0; i < count; i++) { - WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); - WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; From 2ef3bac16fb5e9eee4fb1d722578a79b751ea58a Mon Sep 17 00:00:00 2001 From: Wesley Atwell Date: Mon, 9 Mar 2026 00:26:24 -0600 Subject: [PATCH 048/129] crypto: krb5enc - fix sleepable flag handling in encrypt dispatch krb5enc_encrypt_ahash_done() continues encryption from an ahash completion callback by calling krb5enc_dispatch_encrypt(). That helper takes a flags argument for this continuation path, but it ignored that argument and reused aead_request_flags(req) when setting up the skcipher subrequest callback. This can incorrectly preserve CRYPTO_TFM_REQ_MAY_SLEEP when the encrypt step is started from callback context. Preserve the original request flags but clear CRYPTO_TFM_REQ_MAY_SLEEP for the callback continuation path, and use the caller-supplied flags when setting up the skcipher subrequest. Fixes: d1775a177f7f ("crypto: Add 'krb5enc' hash and cipher AEAD algorithm") Assisted-by: Codex:GPT-5 Signed-off-by: Wesley Atwell Signed-off-by: Herbert Xu --- crypto/krb5enc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c index a1de55994d92..1bfe8370cf94 100644 --- a/crypto/krb5enc.c +++ b/crypto/krb5enc.c @@ -154,7 +154,7 @@ static int krb5enc_dispatch_encrypt(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), + skcipher_request_set_callback(skreq, flags, krb5enc_encrypt_done, req); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv); @@ -192,7 +192,8 @@ static void krb5enc_encrypt_ahash_done(void *data, int err) krb5enc_insert_checksum(req, ahreq->result); - err = krb5enc_dispatch_encrypt(req, 0); + err = krb5enc_dispatch_encrypt(req, + aead_request_flags(req) & ~CRYPTO_TFM_REQ_MAY_SLEEP); if (err != -EINPROGRESS) aead_request_complete(req, err); } From 326118443ec3de85708a2678dcc2c14eea417228 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 9 Mar 2026 22:11:21 +0100 Subject: [PATCH 049/129] crypto: artpec6 - use memcpy_and_pad to simplify prepare_hash Use memcpy_and_pad() instead of memcpy() followed by memset() to simplify artpec6_crypto_prepare_hash(). Also fix a duplicate word in a comment and remove a now-redundant one. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index b04d6379244a..a4793b76300c 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1323,7 +1323,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) artpec6_crypto_init_dma_operation(common); - /* Upload HMAC key, must be first the first packet */ + /* Upload HMAC key, it must be the first packet */ if (req_ctx->hash_flags & HASH_FLAG_HMAC) { if (variant == ARTPEC6_CRYPTO) { req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, @@ -1333,11 +1333,8 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) a7_regk_crypto_dlkey); } - /* Copy and pad up the key */ - memcpy(req_ctx->key_buffer, ctx->hmac_key, - ctx->hmac_key_length); - memset(req_ctx->key_buffer + ctx->hmac_key_length, 0, - blocksize - ctx->hmac_key_length); + memcpy_and_pad(req_ctx->key_buffer, blocksize, ctx->hmac_key, + ctx->hmac_key_length, 0); error = artpec6_crypto_setup_out_descr(common, (void *)&req_ctx->key_md, From 2aeec9af775fb53aa086419b953302c6f4ad4984 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 10 Mar 2026 18:28:29 +0900 Subject: [PATCH 050/129] crypto: tegra - Disable softirqs before finalizing request Softirqs must be disabled when calling the finalization fucntion on a request. Reported-by: Guangwu Zhang Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 9 +++++++++ drivers/crypto/tegra/tegra-se-hash.c | 3 +++ 2 files changed, 12 insertions(+) diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 0e07d0523291..8b91f00b9c31 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -4,6 +4,7 @@ * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine. */ +#include #include #include #include @@ -333,7 +334,9 @@ out: tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg); out_finalize: + local_bh_disable(); crypto_finalize_skcipher_request(se->engine, req, ret); + local_bh_enable(); return 0; } @@ -1261,7 +1264,9 @@ out_free_inbuf: tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); out_finalize: + local_bh_disable(); crypto_finalize_aead_request(ctx->se->engine, req, ret); + local_bh_enable(); return 0; } @@ -1347,7 +1352,9 @@ out_free_inbuf: tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); out_finalize: + local_bh_disable(); crypto_finalize_aead_request(ctx->se->engine, req, ret); + local_bh_enable(); return 0; } @@ -1745,7 +1752,9 @@ out: if (tegra_key_is_reserved(rctx->key_id)) tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + local_bh_disable(); crypto_finalize_hash_request(se->engine, req, ret); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 4a298ace6e9f..79f1e5c9b729 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -4,6 +4,7 @@ * Crypto driver to handle HASH algorithms using NVIDIA Security Engine. */ +#include #include #include #include @@ -546,7 +547,9 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) } out: + local_bh_disable(); crypto_finalize_hash_request(se->engine, req, ret); + local_bh_enable(); return 0; } From 3fcfff4ed35f963380a68741bcd52742baff7f76 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 11 Mar 2026 03:07:35 +0100 Subject: [PATCH 051/129] crypto: atmel-aes - Fix 3-page memory leak in atmel_aes_buff_cleanup atmel_aes_buff_init() allocates 4 pages using __get_free_pages() with ATMEL_AES_BUFFER_ORDER, but atmel_aes_buff_cleanup() frees only the first page using free_page(), leaking the remaining 3 pages. Use free_pages() with ATMEL_AES_BUFFER_ORDER to fix the memory leak. Fixes: bbe628ed897d ("crypto: atmel-aes - improve performances of data transfer") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index bc0c40f10944..9b0cb97055dc 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2131,7 +2131,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd) static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) { - free_page((unsigned long)dd->buf); + free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER); } static int atmel_aes_dma_init(struct atmel_aes_dev *dd) From 464da0bf19fd0fdf4a6594ce2d3352bc5c3e676d Mon Sep 17 00:00:00 2001 From: George Abraham P Date: Wed, 11 Mar 2026 13:52:45 +0530 Subject: [PATCH 052/129] crypto: qat - add wireless mode support for QAT GEN6 Add wireless mode support for QAT GEN6 devices. When the WCP_WAT fuse bit is clear, the device operates in wireless cipher mode (wcy_mode). In this mode all accelerator engines load the wireless firmware and service configuration via 'cfg_services' sysfs attribute is restricted to 'sym' only. The get_accel_cap() function is extended to report wireless-specific capabilities (ZUC, ZUC-256, 5G, extended algorithm chaining) gated by their respective slice-disable fuse bits. The set_ssm_wdtimer() function is updated to configure WCP (wireless cipher) and WAT (wireless authentication) watchdog timers. The adf_gen6_cfg_dev_init() function is updated to use adf_6xxx_is_wcy() to enforce sym-only service selection for WCY devices during initialization. Co-developed-by: Aviraj Cj Signed-off-by: Aviraj Cj Signed-off-by: George Abraham P Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../intel/qat/qat_6xxx/adf_6xxx_hw_data.c | 97 +++++++++++++++++-- .../intel/qat/qat_6xxx/adf_6xxx_hw_data.h | 14 +++ drivers/crypto/intel/qat/qat_6xxx/adf_drv.c | 33 ++++++- .../intel/qat/qat_common/adf_fw_config.h | 1 + .../intel/qat/qat_common/adf_gen6_shared.c | 6 -- .../intel/qat/qat_common/adf_gen6_shared.h | 1 - .../crypto/intel/qat/qat_common/icp_qat_hw.h | 3 +- 7 files changed, 137 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c index bed88d3ce8ca..f4c61978b048 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c @@ -82,10 +82,15 @@ static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = { 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00 }; +static const unsigned long thrd_mask_wcy[ADF_6XXX_MAX_ACCELENGINES] = { + 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00 +}; + static const char *const adf_6xxx_fw_objs[] = { [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ, [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ, [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ, + [ADF_FW_WCY_OBJ] = ADF_6XXX_WCY_OBJ, }; static const struct adf_fw_config adf_default_fw_config[] = { @@ -94,6 +99,12 @@ static const struct adf_fw_config adf_default_fw_config[] = { { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ }, }; +static const struct adf_fw_config adf_wcy_fw_config[] = { + { ADF_AE_GROUP_1, ADF_FW_WCY_OBJ }, + { ADF_AE_GROUP_0, ADF_FW_WCY_OBJ }, + { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ }, +}; + static struct adf_hw_device_class adf_6xxx_class = { .name = ADF_6XXX_DEVICE_NAME, .type = DEV_6XXX, @@ -118,6 +129,12 @@ static bool services_supported(unsigned long mask) } } +static bool wcy_services_supported(unsigned long mask) +{ + /* The wireless SKU supports only the symmetric crypto service */ + return mask == BIT(SVC_SYM); +} + static int get_service(unsigned long *mask) { if (test_and_clear_bit(SVC_ASYM, mask)) @@ -155,8 +172,12 @@ static enum adf_cfg_service_type get_ring_type(unsigned int service) } } -static const unsigned long *get_thrd_mask(unsigned int service) +static const unsigned long *get_thrd_mask(struct adf_accel_dev *accel_dev, + unsigned int service) { + if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev))) + return (service == SVC_SYM) ? thrd_mask_wcy : NULL; + switch (service) { case SVC_SYM: return thrd_mask_sym; @@ -194,7 +215,7 @@ static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config return service; rp_config[i].ring_type = get_ring_type(service); - rp_config[i].thrd_mask = get_thrd_mask(service); + rp_config[i].thrd_mask = get_thrd_mask(accel_dev, service); /* * If there is only one service enabled, use all ring pairs for @@ -386,6 +407,8 @@ static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev) ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val); ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val); ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWCPL_OFFSET, ADF_SSMWDTWCPH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWATL_OFFSET, ADF_SSMWDTWATH_OFFSET, val); /* Enable watchdog timer for pke */ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke); @@ -631,6 +654,12 @@ static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev) return set_vc_config(accel_dev); } +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) +{ + return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ? adf_wcy_fw_config : + adf_default_fw_config; +} + static u32 get_ae_mask(struct adf_hw_device_data *self) { unsigned long fuses = self->fuses[ADF_FUSECTL4]; @@ -653,6 +682,38 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return mask; } +static u32 get_accel_cap_wcy(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym; + u32 fuse; + + fuse = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1]; + + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | + ICP_ACCEL_CAPABILITIES_5G | + ICP_ACCEL_CAPABILITIES_ZUC | + ICP_ACCEL_CAPABILITIES_ZUC_256 | + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; + + if (fuse & ICP_ACCEL_GEN6_MASK_EIA3_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + } + if (fuse & ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + + if (fuse & ICP_ACCEL_GEN6_MASK_5G_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_5G; + + if (adf_get_service_enabled(accel_dev) == SVC_SYM) + return capabilities_sym; + + return 0; +} + static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { u32 capabilities_sym, capabilities_asym; @@ -661,6 +722,9 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) u32 caps = 0; u32 fusectl1; + if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev))) + return get_accel_cap_wcy(accel_dev); + fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1]; /* Read accelerator capabilities mask */ @@ -733,15 +797,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { - return ARRAY_SIZE(adf_default_fw_config); + return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ? + ARRAY_SIZE(adf_wcy_fw_config) : + ARRAY_SIZE(adf_default_fw_config); } static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num) { int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs); + const struct adf_fw_config *fw_config; int id; - id = adf_default_fw_config[obj_num].obj; + fw_config = get_fw_config(accel_dev); + id = fw_config[obj_num].obj; if (id >= num_fw_objs) return NULL; @@ -755,15 +823,22 @@ static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_nu static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) { + const struct adf_fw_config *fw_config; + if (obj_num >= uof_get_num_objs(accel_dev)) return -EINVAL; - return adf_default_fw_config[obj_num].obj; + fw_config = get_fw_config(accel_dev); + + return fw_config[obj_num].obj; } static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { - return adf_default_fw_config[obj_num].ae_mask; + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + return fw_config[obj_num].ae_mask; } static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) @@ -873,6 +948,14 @@ static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data) init_num_svc_aes(rl_data); } +static void adf_gen6_init_services_supported(struct adf_hw_device_data *hw_data) +{ + if (adf_6xxx_is_wcy(hw_data)) + hw_data->services_supported = wcy_services_supported; + else + hw_data->services_supported = services_supported; +} + void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &adf_6xxx_class; @@ -929,11 +1012,11 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) hw_data->stop_timer = adf_timer_stop; hw_data->init_device = adf_init_device; hw_data->enable_pm = enable_pm; - hw_data->services_supported = services_supported; hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS; hw_data->clock_frequency = ADF_6XXX_AE_FREQ; hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt; + adf_gen6_init_services_supported(hw_data); adf_gen6_init_hw_csr_ops(&hw_data->csr_ops); adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen6_init_dc_ops(&hw_data->dc_ops); diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h index d822911fe68c..fa31d6d584e6 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h @@ -64,10 +64,14 @@ #define ADF_SSMWDTATHH_OFFSET 0x520C #define ADF_SSMWDTCNVL_OFFSET 0x5408 #define ADF_SSMWDTCNVH_OFFSET 0x540C +#define ADF_SSMWDTWCPL_OFFSET 0x5608 +#define ADF_SSMWDTWCPH_OFFSET 0x560C #define ADF_SSMWDTUCSL_OFFSET 0x5808 #define ADF_SSMWDTUCSH_OFFSET 0x580C #define ADF_SSMWDTDCPRL_OFFSET 0x5A08 #define ADF_SSMWDTDCPRH_OFFSET 0x5A0C +#define ADF_SSMWDTWATL_OFFSET 0x5C08 +#define ADF_SSMWDTWATH_OFFSET 0x5C0C #define ADF_SSMWDTPKEL_OFFSET 0x5E08 #define ADF_SSMWDTPKEH_OFFSET 0x5E0C @@ -139,6 +143,7 @@ #define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin" #define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin" #define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin" +#define ADF_6XXX_WCY_OBJ "qat_6xxx_wcy.bin" /* RL constants */ #define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100 @@ -159,9 +164,18 @@ enum icp_qat_gen6_slice_mask { ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2), ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3), ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4), + ICP_ACCEL_GEN6_MASK_EIA3_SLICE = BIT(5), ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6), + ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE = BIT(7), + ICP_ACCEL_GEN6_MASK_5G_SLICE = BIT(8), }; +/* Return true if the device is a wireless crypto (WCY) SKU */ +static inline bool adf_6xxx_is_wcy(struct adf_hw_device_data *hw_data) +{ + return !(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE); +} + void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data); diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c index c1dc9c56fdf5..0684ea9be2ac 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c @@ -16,6 +16,7 @@ #include "adf_gen6_shared.h" #include "adf_6xxx_hw_data.h" +#include "adf_heartbeat.h" static int bar_map[] = { 0, /* SRAM */ @@ -53,6 +54,35 @@ static void adf_devmgr_remove(void *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } +static int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + const char *config; + int ret; + + /* + * Wireless SKU - symmetric crypto service only + * Non-wireless SKU - crypto service for even devices and compression for odd devices + */ + if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev))) + config = ADF_CFG_SYM; + else + config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; + + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, config, + ADF_STR); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + + return 0; +} + static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_pci *accel_pci_dev; @@ -91,9 +121,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]); - if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE)) - return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n"); - /* Enable PCI device */ ret = pcim_enable_device(pdev); if (ret) diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h index 78957fa900b7..d5c578e3fd8d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -9,6 +9,7 @@ enum adf_fw_objs { ADF_FW_DC_OBJ, ADF_FW_ADMIN_OBJ, ADF_FW_CY_OBJ, + ADF_FW_WCY_OBJ, }; struct adf_fw_config { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c index c9b151006dca..ffe4525a1e69 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c @@ -31,12 +31,6 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) } EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops); -int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev) -{ - return adf_gen4_cfg_dev_init(accel_dev); -} -EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init); - int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev) { return adf_comp_dev_config(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h index fc6fad029a70..072115a531e4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h @@ -10,7 +10,6 @@ struct adf_pfvf_ops; void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops); void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); -int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev); int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev); int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev); void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index b8f1c4ffb8b5..0223bd541f1f 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -94,7 +94,8 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3), ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4), ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5), - /* Bits 6-7 are currently reserved */ + /* Bit 6 is currently reserved */ + ICP_ACCEL_CAPABILITIES_5G = BIT(7), ICP_ACCEL_CAPABILITIES_ZUC = BIT(8), ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9), /* Bits 10-11 are currently reserved */ From 57a13941c0bb06ae24e3b34672d7b6f2172b253f Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 11 Mar 2026 12:39:28 +0100 Subject: [PATCH 053/129] crypto: atmel-aes - guard unregister on error in atmel_aes_register_algs Ensure the device supports XTS and GCM with 'has_xts' and 'has_gcm' before unregistering algorithms when XTS or authenc registration fails, which would trigger a WARN in crypto_unregister_alg(). Currently, with the capabilities defined in atmel_aes_get_cap(), this bug cannot happen because all devices that support XTS and authenc also support GCM, but the error handling should still be correct regardless of hardware capabilities. Fixes: d52db5188a87 ("crypto: atmel-aes - add support to the XTS mode") Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 9b0cb97055dc..b393689400b4 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2270,10 +2270,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) /* i = ARRAY_SIZE(aes_authenc_algs); */ err_aes_authenc_alg: crypto_unregister_aeads(aes_authenc_algs, i); - crypto_unregister_skcipher(&aes_xts_alg); + if (dd->caps.has_xts) + crypto_unregister_skcipher(&aes_xts_alg); #endif err_aes_xts_alg: - crypto_unregister_aead(&aes_gcm_alg); + if (dd->caps.has_gcm) + crypto_unregister_aead(&aes_gcm_alg); err_aes_gcm_alg: i = ARRAY_SIZE(aes_algs); err_aes_algs: From adb3faf2db1a66d0f015b44ac909a32dfc7f2f9c Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 11 Mar 2026 16:56:47 +0100 Subject: [PATCH 054/129] crypto: nx - fix bounce buffer leaks in nx842_crypto_{alloc,free}_ctx The bounce buffers are allocated with __get_free_pages() using BOUNCE_BUFFER_ORDER (order 2 = 4 pages), but both the allocation error path and nx842_crypto_free_ctx() release the buffers with free_page(). Use free_pages() with the matching order instead. Fixes: ed70b479c2c0 ("crypto: nx - add hardware 842 crypto comp alg") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index b61f2545e165..661568ce47f0 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -116,8 +116,8 @@ void *nx842_crypto_alloc_ctx(struct nx842_driver *driver) ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { kfree(ctx->wmem); - free_page((unsigned long)ctx->sbounce); - free_page((unsigned long)ctx->dbounce); + free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); + free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); kfree(ctx); return ERR_PTR(-ENOMEM); } @@ -131,8 +131,8 @@ void nx842_crypto_free_ctx(void *p) struct nx842_crypto_ctx *ctx = p; kfree(ctx->wmem); - free_page((unsigned long)ctx->sbounce); - free_page((unsigned long)ctx->dbounce); + free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); + free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); } EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); From 344e6a4f7ff4756b9b3f75e0eb7eaec297e35540 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 11 Mar 2026 16:56:49 +0100 Subject: [PATCH 055/129] crypto: nx - fix context leak in nx842_crypto_free_ctx Since the scomp conversion, nx842_crypto_alloc_ctx() allocates the context separately, but nx842_crypto_free_ctx() never releases it. Add the missing kfree(ctx) to nx842_crypto_free_ctx(), and reuse nx842_crypto_free_ctx() in the allocation error path. Fixes: 980b5705f4e7 ("crypto: nx - Migrate to scomp API") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 661568ce47f0..a61208cbcd27 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -115,10 +115,7 @@ void *nx842_crypto_alloc_ctx(struct nx842_driver *driver) ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { - kfree(ctx->wmem); - free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); - free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); - kfree(ctx); + nx842_crypto_free_ctx(ctx); return ERR_PTR(-ENOMEM); } @@ -133,6 +130,7 @@ void nx842_crypto_free_ctx(void *p) kfree(ctx->wmem); free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); + kfree(ctx); } EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); From 3414c809777e37855063347f5fbd23ff03e1c9fb Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 11 Mar 2026 22:13:23 -0700 Subject: [PATCH 056/129] hwrng: core - avoid kernel-doc warnings Mark internal fields as "private:" so that kernel-doc comments are not needed for them, eliminating kernel-doc warnings: Warning: include/linux/hw_random.h:54 struct member 'list' not described in 'hwrng' Warning: include/linux/hw_random.h:54 struct member 'ref' not described in 'hwrng' Warning: include/linux/hw_random.h:54 struct member 'cleanup_work' not described in 'hwrng' Warning: include/linux/hw_random.h:54 struct member 'cleanup_done' not described in 'hwrng' Warning: include/linux/hw_random.h:54 struct member 'dying' not described in 'hwrng' Signed-off-by: Randy Dunlap Signed-off-by: Herbert Xu --- include/linux/hw_random.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index b77bc55a4cf3..1d3c1927986e 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -46,7 +46,7 @@ struct hwrng { unsigned long priv; unsigned short quality; - /* internal. */ + /* private: internal. */ struct list_head list; struct kref ref; struct work_struct cleanup_work; From b44c7129f1e3cd0e6233c7cb2d88f917d92f213d Mon Sep 17 00:00:00 2001 From: Zongyu Wu Date: Fri, 13 Mar 2026 17:40:39 +0800 Subject: [PATCH 057/129] crypto: hisilicon - add device load query functionality to debugfs The accelerator device supports usage statistics. This patch enables obtaining the accelerator's usage through the "dev_usage" file. The returned number expressed as a percentage as a percentage. Signed-off-by: Zongyu Wu Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 7 +++ Documentation/ABI/testing/debugfs-hisi-sec | 7 +++ Documentation/ABI/testing/debugfs-hisi-zip | 7 +++ drivers/crypto/hisilicon/debugfs.c | 54 +++++++++++++++++++++ drivers/crypto/hisilicon/hpre/hpre_main.c | 18 +++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 11 +++++ drivers/crypto/hisilicon/zip/zip_main.c | 19 ++++++++ include/linux/hisi_acc_qm.h | 12 +++++ 8 files changed, 135 insertions(+) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 29fb7d5ffc69..5a137f701eea 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -50,6 +50,13 @@ Description: Dump debug registers from the QM. Available for PF and VF in host. VF in guest currently only has one debug register. +What: /sys/kernel/debug/hisi_hpre//dev_usage +Date: Mar 2026 +Contact: linux-crypto@vger.kernel.org +Description: Query the real-time bandwidth usage of device. + Returns the bandwidth usage of each channel on the device. + The returned number is in percentage. + What: /sys/kernel/debug/hisi_hpre//qm/current_q Date: Sep 2019 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index 82bf4a0dc7f7..676e2dc2de8d 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -24,6 +24,13 @@ Description: The is related the function for PF and VF. 1/1000~1000/1000 of total QoS. The driver reading alg_qos to get related QoS in the host and VM, Such as "cat alg_qos". +What: /sys/kernel/debug/hisi_sec2//dev_usage +Date: Mar 2026 +Contact: linux-crypto@vger.kernel.org +Description: Query the real-time bandwidth usage of device. + Returns the bandwidth usage of each channel on the device. + The returned number is in percentage. + What: /sys/kernel/debug/hisi_sec2//qm/qm_regs Date: Oct 2019 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index 0abd65d27e9b..46bf47bf6b42 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -36,6 +36,13 @@ Description: The is related the function for PF and VF. 1/1000~1000/1000 of total QoS. The driver reading alg_qos to get related QoS in the host and VM, Such as "cat alg_qos". +What: /sys/kernel/debug/hisi_zip//dev_usage +Date: Mar 2026 +Contact: linux-crypto@vger.kernel.org +Description: Query the real-time bandwidth usage of device. + Returns the bandwidth usage of each channel on the device. + The returned number is in percentage. + What: /sys/kernel/debug/hisi_zip//qm/regs Date: Nov 2018 Contact: linux-crypto@vger.kernel.org diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 32e9f8350289..5d8b4112c543 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -1040,6 +1040,57 @@ void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm) } } +static int qm_usage_percent(struct hisi_qm *qm, int chan_num) +{ + u32 val, used_bw, total_bw; + + val = readl(qm->io_base + QM_CHANNEL_USAGE_OFFSET + + chan_num * QM_CHANNEL_ADDR_INTRVL); + used_bw = lower_16_bits(val); + total_bw = upper_16_bits(val); + if (!total_bw) + return -EIO; + + if (total_bw <= used_bw) + return QM_MAX_DEV_USAGE; + + return (used_bw * QM_DEV_USAGE_RATE) / total_bw; +} + +static int qm_usage_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + bool dev_is_active = true; + int i, ret; + + /* If device is in suspended, usage is 0. */ + ret = hisi_qm_get_dfx_access(qm); + if (ret == -EAGAIN) { + dev_is_active = false; + } else if (ret) { + dev_err(&qm->pdev->dev, "failed to get dfx access for usage_show!\n"); + return ret; + } + + ret = 0; + for (i = 0; i < qm->channel_data.channel_num; i++) { + if (dev_is_active) { + ret = qm_usage_percent(qm, i); + if (ret < 0) { + hisi_qm_put_dfx_access(qm); + return ret; + } + } + seq_printf(s, "%s: %d\n", qm->channel_data.channel_name[i], ret); + } + + if (dev_is_active) + hisi_qm_put_dfx_access(qm); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(qm_usage); + static int qm_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; @@ -1159,6 +1210,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, qm, &qm_diff_regs_fops); + if (qm->ver >= QM_HW_V5) + debugfs_create_file("dev_usage", 0444, qm->debug.debug_root, qm, &qm_usage_fops); + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 884d5d0afaf4..357ab5e5887e 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -121,6 +121,8 @@ #define HPRE_DFX_COMMON2_LEN 0xE #define HPRE_DFX_CORE_LEN 0x43 +#define HPRE_MAX_CHANNEL_NUM 2 + static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { @@ -370,6 +372,11 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, }; +static const char *hpre_channel_name[HPRE_MAX_CHANNEL_NUM] = { + "RSA", + "ECC", +}; + static const struct hisi_qm_err_ini hpre_err_ini; bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) @@ -1234,6 +1241,16 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm) return 0; } +static void hpre_set_channels(struct hisi_qm *qm) +{ + struct qm_channel *channel_data = &qm->channel_data; + int i; + + channel_data->channel_num = HPRE_MAX_CHANNEL_NUM; + for (i = 0; i < HPRE_MAX_CHANNEL_NUM; i++) + channel_data->channel_name[i] = hpre_channel_name[i]; +} + static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { u64 alg_msk; @@ -1267,6 +1284,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } + hpre_set_channels(qm); /* Fetch and save the value of capability registers */ ret = hpre_pre_store_cap_reg(qm); if (ret) { diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index efda8646fc60..6647b7340827 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -133,6 +133,8 @@ #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ GENMASK_ULL(45, 43)) +#define SEC_MAX_CHANNEL_NUM 1 + struct sec_hw_error { u32 int_msk; const char *msg; @@ -1288,6 +1290,14 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm) return 0; } +static void sec_set_channels(struct hisi_qm *qm) +{ + struct qm_channel *channel_data = &qm->channel_data; + + channel_data->channel_num = SEC_MAX_CHANNEL_NUM; + channel_data->channel_name[0] = "SEC"; +} + static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { u64 alg_msk; @@ -1325,6 +1335,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } + sec_set_channels(qm); /* Fetch and save the value of capability registers */ ret = sec_pre_store_cap_reg(qm); if (ret) { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 85b26ef17548..44df9c859bd8 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -122,6 +122,8 @@ #define HZIP_LIT_LEN_EN_OFFSET 0x301204 #define HZIP_LIT_LEN_EN_EN BIT(4) +#define HZIP_MAX_CHANNEL_NUM 3 + enum { HZIP_HIGH_COMP_RATE, HZIP_HIGH_COMP_PERF, @@ -359,6 +361,12 @@ static struct dfx_diff_registers hzip_diff_regs[] = { }, }; +static const char *zip_channel_name[HZIP_MAX_CHANNEL_NUM] = { + "COMPRESS", + "DECOMPRESS", + "DAE" +}; + static int hzip_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; @@ -1400,6 +1408,16 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm) return 0; } +static void zip_set_channels(struct hisi_qm *qm) +{ + struct qm_channel *channel_data = &qm->channel_data; + int i; + + channel_data->channel_num = HZIP_MAX_CHANNEL_NUM; + for (i = 0; i < HZIP_MAX_CHANNEL_NUM; i++) + channel_data->channel_name[i] = zip_channel_name[i]; +} + static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { u64 alg_msk; @@ -1438,6 +1456,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } + zip_set_channels(qm); /* Fetch and save the value of capability registers */ ret = zip_pre_store_cap_reg(qm); if (ret) { diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 51a6dc2b97e9..8a581b5bbbcd 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -102,6 +102,12 @@ #define QM_MIG_REGION_SEL 0x100198 #define QM_MIG_REGION_EN BIT(0) +#define QM_MAX_CHANNEL_NUM 8 +#define QM_CHANNEL_USAGE_OFFSET 0x1100 +#define QM_MAX_DEV_USAGE 100 +#define QM_DEV_USAGE_RATE 100 +#define QM_CHANNEL_ADDR_INTRVL 0x4 + /* uacce mode of the driver */ #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ #define UACCE_MODE_SVA 1 /* use uacce sva mode */ @@ -359,6 +365,11 @@ struct qm_rsv_buf { struct qm_dma qcdma; }; +struct qm_channel { + int channel_num; + const char *channel_name[QM_MAX_CHANNEL_NUM]; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -433,6 +444,7 @@ struct hisi_qm { struct qm_err_isolate isolate_data; struct hisi_qm_cap_tables cap_tables; + struct qm_channel channel_data; }; struct hisi_qp_status { From 7fc31dd86415fc04c71b8fd4743ad63183e8565f Mon Sep 17 00:00:00 2001 From: Sun Chaobo Date: Fri, 13 Mar 2026 22:52:57 +0800 Subject: [PATCH 058/129] crypto: Fix several spelling mistakes in comments Fix several typos in comments and messages. No functional change. Signed-off-by: Sun Chaobo Signed-off-by: Herbert Xu --- crypto/drbg.c | 2 +- crypto/lrw.c | 2 +- crypto/tcrypt.c | 2 +- crypto/tea.c | 2 +- crypto/xts.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index 1ed209e5d5dd..9204e6edb426 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1780,7 +1780,7 @@ static inline int __init drbg_healthcheck_sanity(void) max_addtllen = drbg_max_addtl(drbg); max_request_bytes = drbg_max_request_bytes(drbg); drbg_string_fill(&addtl, buf, max_addtllen + 1); - /* overflow addtllen with additonal info string */ + /* overflow addtllen with additional info string */ len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl); BUG_ON(0 < len); /* overflow max_bits */ diff --git a/crypto/lrw.c b/crypto/lrw.c index dd403b800513..aa31ab03a597 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -134,7 +134,7 @@ static int lrw_next_index(u32 *counter) /* * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make - * mutliple calls to the 'ecb(..)' instance, which usually would be slower than + * multiple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the lrw_next_index() calls again. */ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index db860f45765f..52e3b584c524 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2828,7 +2828,7 @@ static int __init tcrypt_mod_init(void) pr_debug("all tests passed\n"); } - /* We intentionaly return -EAGAIN to prevent keeping the module, + /* We intentionally return -EAGAIN to prevent keeping the module, * unless we're running in fips mode. It does all its work from * init() and doesn't offer any runtime functionality, but in * the fips case, checking for a successful load is helpful. diff --git a/crypto/tea.c b/crypto/tea.c index cb05140e3470..7c66efcb5caa 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -2,7 +2,7 @@ /* * Cryptographic API. * - * TEA, XTEA, and XETA crypto alogrithms + * TEA, XTEA, and XETA crypto algorithms * * The TEA and Xtended TEA algorithms were developed by David Wheeler * and Roger Needham at the Computer Laboratory of Cambridge University. diff --git a/crypto/xts.c b/crypto/xts.c index 3da8f5e053d6..ad97c8091582 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -76,7 +76,7 @@ static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, /* * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make - * mutliple calls to the 'ecb(..)' instance, which usually would be slower than + * multiple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, From c8c4a2972f83c8b68ff03b43cecdb898939ff851 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Fri, 13 Mar 2026 11:24:33 -0400 Subject: [PATCH 059/129] padata: Put CPU offline callback in ONLINE section to allow failure syzbot reported the following warning: DEAD callback error for CPU1 WARNING: kernel/cpu.c:1463 at _cpu_down+0x759/0x1020 kernel/cpu.c:1463, CPU#0: syz.0.1960/14614 at commit 4ae12d8bd9a8 ("Merge tag 'kbuild-fixes-7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux") which tglx traced to padata_cpu_dead() given it's the only sub-CPUHP_TEARDOWN_CPU callback that returns an error. Failure isn't allowed in hotplug states before CPUHP_TEARDOWN_CPU so move the CPU offline callback to the ONLINE section where failure is possible. Fixes: 894c9ef9780c ("padata: validate cpumask without removed CPU during offline") Reported-by: syzbot+123e1b70473ce213f3af@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/69af0a05.050a0220.310d8.002f.GAE@google.com/ Debugged-by: Thomas Gleixner Signed-off-by: Daniel Jordan Signed-off-by: Herbert Xu --- include/linux/cpuhotplug.h | 1 - include/linux/padata.h | 8 +-- kernel/padata.c | 120 +++++++++++++++++++------------------ 3 files changed, 65 insertions(+), 64 deletions(-) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 62cd7b35a29c..22ba327ec227 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -92,7 +92,6 @@ enum cpuhp_state { CPUHP_NET_DEV_DEAD, CPUHP_IOMMU_IOVA_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, - CPUHP_PADATA_DEAD, CPUHP_AP_DTPM_CPU_DEAD, CPUHP_RANDOM_PREPARE, CPUHP_WORKQUEUE_PREP, diff --git a/include/linux/padata.h b/include/linux/padata.h index 765f2778e264..b6232bea6edf 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -149,23 +149,23 @@ struct padata_mt_job { /** * struct padata_instance - The overall control structure. * - * @cpu_online_node: Linkage for CPU online callback. - * @cpu_dead_node: Linkage for CPU offline callback. + * @cpuhp_node: Linkage for CPU hotplug callbacks. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. + * @validate_cpumask: Internal cpumask used to validate @cpumask during hotplug. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct hlist_node cpu_online_node; - struct hlist_node cpu_dead_node; + struct hlist_node cpuhp_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; + cpumask_var_t validate_cpumask; struct kobject kobj; struct mutex lock; u8 flags; diff --git a/kernel/padata.c b/kernel/padata.c index 9e7cfa5ed55b..0d3ea1b68b1f 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -535,7 +535,8 @@ static void padata_init_reorder_list(struct parallel_data *pd) } /* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) +static struct parallel_data *padata_alloc_pd(struct padata_shell *ps, + int offlining_cpu) { struct padata_instance *pinst = ps->pinst; struct parallel_data *pd; @@ -561,6 +562,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); + if (offlining_cpu >= 0) { + __cpumask_clear_cpu(offlining_cpu, pd->cpumask.pcpu); + __cpumask_clear_cpu(offlining_cpu, pd->cpumask.cbcpu); + } padata_init_reorder_list(pd); padata_init_squeues(pd); @@ -607,11 +612,11 @@ static void __padata_stop(struct padata_instance *pinst) } /* Replace the internal control structure with a new one. */ -static int padata_replace_one(struct padata_shell *ps) +static int padata_replace_one(struct padata_shell *ps, int offlining_cpu) { struct parallel_data *pd_new; - pd_new = padata_alloc_pd(ps); + pd_new = padata_alloc_pd(ps, offlining_cpu); if (!pd_new) return -ENOMEM; @@ -621,7 +626,7 @@ static int padata_replace_one(struct padata_shell *ps) return 0; } -static int padata_replace(struct padata_instance *pinst) +static int padata_replace(struct padata_instance *pinst, int offlining_cpu) { struct padata_shell *ps; int err = 0; @@ -629,7 +634,7 @@ static int padata_replace(struct padata_instance *pinst) pinst->flags |= PADATA_RESET; list_for_each_entry(ps, &pinst->pslist, list) { - err = padata_replace_one(ps); + err = padata_replace_one(ps, offlining_cpu); if (err) break; } @@ -646,9 +651,21 @@ static int padata_replace(struct padata_instance *pinst) /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, - const struct cpumask *cpumask) + const struct cpumask *cpumask, + int offlining_cpu) { - if (!cpumask_intersects(cpumask, cpu_online_mask)) { + cpumask_copy(pinst->validate_cpumask, cpu_online_mask); + + /* + * @offlining_cpu is still in cpu_online_mask, so remove it here for + * validation. Using a sub-CPUHP_TEARDOWN_CPU hotplug state where + * @offlining_cpu wouldn't be in the online mask doesn't work because + * padata_cpu_offline() can fail but such a state doesn't allow failure. + */ + if (offlining_cpu >= 0) + __cpumask_clear_cpu(offlining_cpu, pinst->validate_cpumask); + + if (!cpumask_intersects(cpumask, pinst->validate_cpumask)) { pinst->flags |= PADATA_INVALID; return false; } @@ -664,13 +681,13 @@ static int __padata_set_cpumasks(struct padata_instance *pinst, int valid; int err; - valid = padata_validate_cpumask(pinst, pcpumask); + valid = padata_validate_cpumask(pinst, pcpumask, -1); if (!valid) { __padata_stop(pinst); goto out_replace; } - valid = padata_validate_cpumask(pinst, cbcpumask); + valid = padata_validate_cpumask(pinst, cbcpumask, -1); if (!valid) __padata_stop(pinst); @@ -678,7 +695,7 @@ out_replace: cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); + err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1); if (valid) __padata_start(pinst); @@ -730,26 +747,6 @@ EXPORT_SYMBOL(padata_set_cpumask); #ifdef CONFIG_HOTPLUG_CPU -static int __padata_add_cpu(struct padata_instance *pinst, int cpu) -{ - int err = padata_replace(pinst); - - if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && - padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) - __padata_start(pinst); - - return err; -} - -static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) -{ - if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || - !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) - __padata_stop(pinst); - - return padata_replace(pinst); -} - static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) { return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || @@ -761,27 +758,39 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); + pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node); if (!pinst_has_cpu(pinst, cpu)) return 0; mutex_lock(&pinst->lock); - ret = __padata_add_cpu(pinst, cpu); + + ret = padata_replace(pinst, -1); + + if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu, -1) && + padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, -1)) + __padata_start(pinst); + mutex_unlock(&pinst->lock); return ret; } -static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) +static int padata_cpu_offline(unsigned int cpu, struct hlist_node *node) { struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); + pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node); if (!pinst_has_cpu(pinst, cpu)) return 0; mutex_lock(&pinst->lock); - ret = __padata_remove_cpu(pinst, cpu); + + if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu, cpu) || + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, cpu)) + __padata_stop(pinst); + + ret = padata_replace(pinst, cpu); + mutex_unlock(&pinst->lock); return ret; } @@ -792,15 +801,14 @@ static enum cpuhp_state hp_online; static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU - cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, - &pinst->cpu_dead_node); - cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); + cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpuhp_node); #endif WARN_ON(!list_empty(&pinst->pslist)); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); + free_cpumask_var(pinst->validate_cpumask); destroy_workqueue(pinst->serial_wq); destroy_workqueue(pinst->parallel_wq); kfree(pinst); @@ -961,10 +969,10 @@ struct padata_instance *padata_alloc(const char *name) if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) goto err_free_serial_wq; - if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { - free_cpumask_var(pinst->cpumask.pcpu); - goto err_free_serial_wq; - } + if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) + goto err_free_p_mask; + if (!alloc_cpumask_var(&pinst->validate_cpumask, GFP_KERNEL)) + goto err_free_cb_mask; INIT_LIST_HEAD(&pinst->pslist); @@ -972,7 +980,7 @@ struct padata_instance *padata_alloc(const char *name) cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); if (padata_setup_cpumasks(pinst)) - goto err_free_masks; + goto err_free_v_mask; __padata_start(pinst); @@ -981,18 +989,19 @@ struct padata_instance *padata_alloc(const char *name) #ifdef CONFIG_HOTPLUG_CPU cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, - &pinst->cpu_online_node); - cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, - &pinst->cpu_dead_node); + &pinst->cpuhp_node); #endif cpus_read_unlock(); return pinst; -err_free_masks: - free_cpumask_var(pinst->cpumask.pcpu); +err_free_v_mask: + free_cpumask_var(pinst->validate_cpumask); +err_free_cb_mask: free_cpumask_var(pinst->cpumask.cbcpu); +err_free_p_mask: + free_cpumask_var(pinst->cpumask.pcpu); err_free_serial_wq: destroy_workqueue(pinst->serial_wq); err_put_cpus: @@ -1035,7 +1044,7 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) ps->pinst = pinst; cpus_read_lock(); - pd = padata_alloc_pd(ps); + pd = padata_alloc_pd(ps, -1); cpus_read_unlock(); if (!pd) @@ -1084,31 +1093,24 @@ void __init padata_init(void) int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", - padata_cpu_online, NULL); + padata_cpu_online, padata_cpu_offline); if (ret < 0) goto err; hp_online = ret; - - ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", - NULL, padata_cpu_dead); - if (ret < 0) - goto remove_online_state; #endif possible_cpus = num_possible_cpus(); padata_works = kmalloc_objs(struct padata_work, possible_cpus); if (!padata_works) - goto remove_dead_state; + goto remove_online_state; for (i = 0; i < possible_cpus; ++i) list_add(&padata_works[i].pw_list, &padata_free_works); return; -remove_dead_state: -#ifdef CONFIG_HOTPLUG_CPU - cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); remove_online_state: +#ifdef CONFIG_HOTPLUG_CPU cpuhp_remove_multi_state(hp_online); err: #endif From bab1adf3b87e4bfac92c4f5963c63db434d561c1 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 14 Mar 2026 20:36:29 +0100 Subject: [PATCH 060/129] crypto: atmel-sha204a - Fix potential UAF and memory leak in remove path Unregister the hwrng to prevent new ->read() calls and flush the Atmel I2C workqueue before teardown to prevent a potential UAF if a queued callback runs while the device is being removed. Drop the early return to ensure sysfs entries are removed and ->hwrng.priv is freed, preventing a memory leak. Fixes: da001fb651b0 ("crypto: atmel-i2c - add support for SHA204A random number generator") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha204a.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 691531647fd6..dbb39ed0cea1 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -194,10 +194,8 @@ static void atmel_sha204a_remove(struct i2c_client *client) { struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); - if (atomic_read(&i2c_priv->tfm_count)) { - dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n"); - return; - } + devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng); + atmel_i2c_flush_queue(); sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups); From 7c622c4fa8b475df1977bfe3ac5d28d9da0c57fc Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 14 Mar 2026 14:37:20 -0700 Subject: [PATCH 061/129] crypto: simd - Remove unused skcipher support Remove the skcipher algorithm support from crypto/simd.c. It is no longer used, and it is unlikely to gain any new user in the future, given the performance issues with this code. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/simd.c | 235 +-------------------------------- include/crypto/internal/simd.h | 19 --- 2 files changed, 5 insertions(+), 249 deletions(-) diff --git a/crypto/simd.c b/crypto/simd.c index 4e6f437e9e77..4e29f797709b 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -13,11 +13,11 @@ /* * Shared crypto SIMD helpers. These functions dynamically create and register - * an skcipher or AEAD algorithm that wraps another, internal algorithm. The - * wrapper ensures that the internal algorithm is only executed in a context - * where SIMD instructions are usable, i.e. where may_use_simd() returns true. - * If SIMD is already usable, the wrapper directly calls the internal algorithm. - * Otherwise it defers execution to a workqueue via cryptd. + * an AEAD algorithm that wraps another, internal algorithm. The wrapper + * ensures that the internal algorithm is only executed in a context where SIMD + * instructions are usable, i.e. where may_use_simd() returns true. If SIMD is + * already usable, the wrapper directly calls the internal algorithm. Otherwise + * it defers execution to a workqueue via cryptd. * * This is an alternative to the internal algorithm implementing a fallback for * the !may_use_simd() case itself. @@ -30,236 +30,11 @@ #include #include #include -#include #include #include #include #include -/* skcipher support */ - -struct simd_skcipher_alg { - const char *ialg_name; - struct skcipher_alg alg; -}; - -struct simd_skcipher_ctx { - struct cryptd_skcipher *cryptd_tfm; -}; - -static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int key_len) -{ - struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_skcipher *child = &ctx->cryptd_tfm->base; - - crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & - CRYPTO_TFM_REQ_MASK); - return crypto_skcipher_setkey(child, key, key_len); -} - -static int simd_skcipher_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_request *subreq; - struct crypto_skcipher *child; - - subreq = skcipher_request_ctx(req); - *subreq = *req; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) - child = &ctx->cryptd_tfm->base; - else - child = cryptd_skcipher_child(ctx->cryptd_tfm); - - skcipher_request_set_tfm(subreq, child); - - return crypto_skcipher_encrypt(subreq); -} - -static int simd_skcipher_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_request *subreq; - struct crypto_skcipher *child; - - subreq = skcipher_request_ctx(req); - *subreq = *req; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) - child = &ctx->cryptd_tfm->base; - else - child = cryptd_skcipher_child(ctx->cryptd_tfm); - - skcipher_request_set_tfm(subreq, child); - - return crypto_skcipher_decrypt(subreq); -} - -static void simd_skcipher_exit(struct crypto_skcipher *tfm) -{ - struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - - cryptd_free_skcipher(ctx->cryptd_tfm); -} - -static int simd_skcipher_init(struct crypto_skcipher *tfm) -{ - struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct cryptd_skcipher *cryptd_tfm; - struct simd_skcipher_alg *salg; - struct skcipher_alg *alg; - unsigned reqsize; - - alg = crypto_skcipher_alg(tfm); - salg = container_of(alg, struct simd_skcipher_alg, alg); - - cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - ctx->cryptd_tfm = cryptd_tfm; - - reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); - reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); - reqsize += sizeof(struct skcipher_request); - - crypto_skcipher_set_reqsize(tfm, reqsize); - - return 0; -} - -struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, - const char *algname, - const char *drvname, - const char *basename) -{ - struct simd_skcipher_alg *salg; - struct skcipher_alg *alg; - int err; - - salg = kzalloc_obj(*salg); - if (!salg) { - salg = ERR_PTR(-ENOMEM); - goto out; - } - - salg->ialg_name = basename; - alg = &salg->alg; - - err = -ENAMETOOLONG; - if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= - CRYPTO_MAX_ALG_NAME) - goto out_free_salg; - - if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - drvname) >= CRYPTO_MAX_ALG_NAME) - goto out_free_salg; - - alg->base.cra_flags = CRYPTO_ALG_ASYNC | - (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); - alg->base.cra_priority = ialg->base.cra_priority; - alg->base.cra_blocksize = ialg->base.cra_blocksize; - alg->base.cra_alignmask = ialg->base.cra_alignmask; - alg->base.cra_module = ialg->base.cra_module; - alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); - - alg->ivsize = ialg->ivsize; - alg->chunksize = ialg->chunksize; - alg->min_keysize = ialg->min_keysize; - alg->max_keysize = ialg->max_keysize; - - alg->init = simd_skcipher_init; - alg->exit = simd_skcipher_exit; - - alg->setkey = simd_skcipher_setkey; - alg->encrypt = simd_skcipher_encrypt; - alg->decrypt = simd_skcipher_decrypt; - - err = crypto_register_skcipher(alg); - if (err) - goto out_free_salg; - -out: - return salg; - -out_free_salg: - kfree(salg); - salg = ERR_PTR(err); - goto out; -} -EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); - -void simd_skcipher_free(struct simd_skcipher_alg *salg) -{ - crypto_unregister_skcipher(&salg->alg); - kfree(salg); -} -EXPORT_SYMBOL_GPL(simd_skcipher_free); - -int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, - struct simd_skcipher_alg **simd_algs) -{ - int err; - int i; - const char *algname; - const char *drvname; - const char *basename; - struct simd_skcipher_alg *simd; - - for (i = 0; i < count; i++) { - if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) || - strncmp(algs[i].base.cra_driver_name, "__", 2))) - return -EINVAL; - } - - err = crypto_register_skciphers(algs, count); - if (err) - return err; - - for (i = 0; i < count; i++) { - algname = algs[i].base.cra_name + 2; - drvname = algs[i].base.cra_driver_name + 2; - basename = algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); - err = PTR_ERR(simd); - if (IS_ERR(simd)) - goto err_unregister; - simd_algs[i] = simd; - } - return 0; - -err_unregister: - simd_unregister_skciphers(algs, count, simd_algs); - return err; -} -EXPORT_SYMBOL_GPL(simd_register_skciphers_compat); - -void simd_unregister_skciphers(struct skcipher_alg *algs, int count, - struct simd_skcipher_alg **simd_algs) -{ - int i; - - crypto_unregister_skciphers(algs, count); - - for (i = 0; i < count; i++) { - if (simd_algs[i]) { - simd_skcipher_free(simd_algs[i]); - simd_algs[i] = NULL; - } - } -} -EXPORT_SYMBOL_GPL(simd_unregister_skciphers); - -/* AEAD support */ - struct simd_aead_alg { const char *ialg_name; struct aead_alg alg; diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h index 9e338e7aafbd..f5e5d7b63951 100644 --- a/include/crypto/internal/simd.h +++ b/include/crypto/internal/simd.h @@ -10,25 +10,6 @@ #include #include -/* skcipher support */ - -struct simd_skcipher_alg; -struct skcipher_alg; - -struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, - const char *algname, - const char *drvname, - const char *basename); -void simd_skcipher_free(struct simd_skcipher_alg *alg); - -int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, - struct simd_skcipher_alg **simd_algs); - -void simd_unregister_skciphers(struct skcipher_alg *algs, int count, - struct simd_skcipher_alg **simd_algs); - -/* AEAD support */ - struct simd_aead_alg; struct aead_alg; From f9bbd547cfb98b1c5e535aab9b0671a2ff22453a Mon Sep 17 00:00:00 2001 From: Kit Dallege Date: Sun, 15 Mar 2026 15:57:22 +0100 Subject: [PATCH 062/129] crypto: add missing kernel-doc for anonymous union members Document the anonymous SKCIPHER_ALG_COMMON and COMP_ALG_COMMON struct members in skcipher_alg, scomp_alg, and acomp_alg, following the existing pattern used by HASH_ALG_COMMON in shash_alg. This fixes the following kernel-doc warnings: include/crypto/skcipher.h:166: struct member 'SKCIPHER_ALG_COMMON' not described in 'skcipher_alg' include/crypto/internal/scompress.h:39: struct member 'COMP_ALG_COMMON' not described in 'scomp_alg' include/crypto/internal/acompress.h:55: struct member 'COMP_ALG_COMMON' not described in 'acomp_alg' Signed-off-by: Kit Dallege Signed-off-by: Herbert Xu --- include/crypto/internal/acompress.h | 1 + include/crypto/internal/scompress.h | 1 + include/crypto/skcipher.h | 1 + 3 files changed, 3 insertions(+) diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 9a3f28baa804..9cd37df32dc4 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -42,6 +42,7 @@ * * @base: Common crypto API algorithm data structure * @calg: Cmonn algorithm data structure shared with scomp + * @COMP_ALG_COMMON: see struct comp_alg_common */ struct acomp_alg { int (*compress)(struct acomp_req *req); diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 6a2c5f2e90f9..13a0851a995b 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -22,6 +22,7 @@ struct crypto_scomp { * @decompress: Function performs a de-compress operation * @streams: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp + * @COMP_ALG_COMMON: see struct comp_alg_common */ struct scomp_alg { int (*compress)(struct crypto_scomp *tfm, const u8 *src, diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 9e5853464345..4efe2ca8c4d1 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -145,6 +145,7 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON; * considerably more efficient if it can operate on multiple chunks * in parallel. Should be a multiple of chunksize. * @co: see struct skcipher_alg_common + * @SKCIPHER_ALG_COMMON: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ From f30579bbae86c860a642621322d90d3d4c60c9b5 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 17 Mar 2026 09:04:52 +0100 Subject: [PATCH 063/129] crypto: s5p-sss - use unregister_{ahashes,skciphers} in probe/remove Replace multiple for loops with calls to crypto_unregister_ahashes() and crypto_unregister_skciphers(). If crypto_register_skcipher() fails in s5p_aes_probe(), log the error directly instead of checking 'i < ARRAY_SIZE(algs)' later. Also drop now-unused local index variables. No functional changes. Signed-off-by: Thorsten Blum Reviewed-by: Krzysztof Kozlowski Reviewed-by: Vladimir Zapolskiy Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index eece1ff6c62f..bdda7b39af85 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2131,7 +2131,7 @@ static struct skcipher_alg algs[] = { static int s5p_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - int i, j, err; + int i, err; const struct samsung_aes_variant *variant; struct s5p_aes_dev *pdata; struct resource *res; @@ -2237,8 +2237,11 @@ static int s5p_aes_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(algs); i++) { err = crypto_register_skcipher(&algs[i]); - if (err) + if (err) { + dev_err(dev, "can't register '%s': %d\n", + algs[i].base.cra_name, err); goto err_algs; + } } if (pdata->use_hash) { @@ -2265,20 +2268,12 @@ static int s5p_aes_probe(struct platform_device *pdev) return 0; err_hash: - for (j = hash_i - 1; j >= 0; j--) - crypto_unregister_ahash(&algs_sha1_md5_sha256[j]); - + crypto_unregister_ahashes(algs_sha1_md5_sha256, hash_i); tasklet_kill(&pdata->hash_tasklet); res->end -= 0x300; err_algs: - if (i < ARRAY_SIZE(algs)) - dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name, - err); - - for (j = 0; j < i; j++) - crypto_unregister_skcipher(&algs[j]); - + crypto_unregister_skciphers(algs, i); tasklet_kill(&pdata->tasklet); err_irq: @@ -2294,15 +2289,13 @@ err_clk: static void s5p_aes_remove(struct platform_device *pdev) { struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); - int i; - for (i = 0; i < ARRAY_SIZE(algs); i++) - crypto_unregister_skcipher(&algs[i]); + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); tasklet_kill(&pdata->tasklet); if (pdata->use_hash) { - for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--) - crypto_unregister_ahash(&algs_sha1_md5_sha256[i]); + crypto_unregister_ahashes(algs_sha1_md5_sha256, + ARRAY_SIZE(algs_sha1_md5_sha256)); pdata->res->end -= 0x300; tasklet_kill(&pdata->hash_tasklet); From 914b0c68d4decebe52b31929f89364de32d0156e Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 17 Mar 2026 17:52:57 +0100 Subject: [PATCH 064/129] crypto: marvell/cesa - use memcpy_and_pad in mv_cesa_ahash_export Replace memset() followed by memcpy() with memcpy_and_pad() to simplify the code and to write to 'cache' only once. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa/hash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index 5103d36cdfdb..2f203042d9bd 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -847,8 +847,7 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, *len = creq->len; memcpy(hash, creq->state, digsize); - memset(cache, 0, blocksize); - memcpy(cache, creq->cache, creq->cache_ptr); + memcpy_and_pad(cache, blocksize, creq->cache, creq->cache_ptr, 0); return 0; } From 928c5e894ca907b11c0b3cda7c37441d863018fd Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 17 Mar 2026 21:18:06 +0100 Subject: [PATCH 065/129] crypto: nx - annotate struct nx842_crypto_header with __counted_by Add the __counted_by() compiler attribute to the flexible array member 'group' to improve access bounds-checking via CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE. Signed-off-by: Thorsten Blum Reviewed-by: Gustavo A. R. Silva Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index f5e2c82ba876..a04e85e9f78e 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -164,7 +164,7 @@ struct nx842_crypto_header { __be16 ignore; /* decompressed end bytes to ignore */ u8 groups; /* total groups in this header */ ); - struct nx842_crypto_header_group group[]; + struct nx842_crypto_header_group group[] __counted_by(groups); } __packed; static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr), "struct member likely outside of struct_group_tagged()"); From b0bfa49c03e3c65737eafa73d8a698eaf55379a6 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 17 Mar 2026 17:40:02 -0600 Subject: [PATCH 066/129] crypto: nx - Fix packed layout in struct nx842_crypto_header struct nx842_crypto_header is declared with the __packed attribute, however the fields grouped with struct_group_tagged() were not packed. This caused the grouped header portion of the structure to lose the packed layout guarantees of the containing structure. Fix this by replacing struct_group_tagged() with __struct_group(..., ..., __packed, ...) so the grouped fields are packed, and the original layout is preserved, restoring the intended packed layout of the structure. Before changes: struct nx842_crypto_header { union { struct { __be16 magic; /* 0 2 */ __be16 ignore; /* 2 2 */ u8 groups; /* 4 1 */ }; /* 0 6 */ struct nx842_crypto_header_hdr hdr; /* 0 6 */ }; /* 0 6 */ struct nx842_crypto_header_group group[]; /* 6 0 */ /* size: 6, cachelines: 1, members: 2 */ /* last cacheline: 6 bytes */ } __attribute__((__packed__)); After changes: struct nx842_crypto_header { union { struct { __be16 magic; /* 0 2 */ __be16 ignore; /* 2 2 */ u8 groups; /* 4 1 */ } __attribute__((__packed__)); /* 0 5 */ struct nx842_crypto_header_hdr hdr; /* 0 5 */ }; /* 0 5 */ struct nx842_crypto_header_group group[]; /* 5 0 */ /* size: 5, cachelines: 1, members: 2 */ /* last cacheline: 5 bytes */ } __attribute__((__packed__)); Fixes: 1e6b251ce175 ("crypto: nx - Avoid -Wflex-array-member-not-at-end warning") Cc: stable@vger.kernel.org Signed-off-by: Gustavo A. R. Silva Reviewed-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index a04e85e9f78e..c401cdf1a453 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -159,7 +159,7 @@ struct nx842_crypto_header_group { struct nx842_crypto_header { /* New members MUST be added within the struct_group() macro below. */ - struct_group_tagged(nx842_crypto_header_hdr, hdr, + __struct_group(nx842_crypto_header_hdr, hdr, __packed, __be16 magic; /* NX842_CRYPTO_MAGIC */ __be16 ignore; /* decompressed end bytes to ignore */ u8 groups; /* total groups in this header */ @@ -167,7 +167,7 @@ struct nx842_crypto_header { struct nx842_crypto_header_group group[] __counted_by(groups); } __packed; static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr), - "struct member likely outside of struct_group_tagged()"); + "struct member likely outside of __struct_group()"); #define NX842_CRYPTO_GROUP_MAX (0x20) From d134feeb5df33fbf77f482f52a366a44642dba09 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Thu, 19 Mar 2026 10:29:32 +0100 Subject: [PATCH 067/129] printk: add print_hex_dump_devel() Add print_hex_dump_devel() as the hex dump equivalent of pr_devel(), which emits output only when DEBUG is enabled, but keeps call sites compiled otherwise. Suggested-by: Herbert Xu Signed-off-by: Thorsten Blum Reviewed-by: John Ogness Signed-off-by: Herbert Xu --- include/linux/printk.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/include/linux/printk.h b/include/linux/printk.h index 63d516c873b4..54e3c621fec3 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -801,6 +801,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, } #endif +#if defined(DEBUG) +#define print_hex_dump_devel(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) +#else +static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ +} +#endif + /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; From 177730a273b18e195263ed953853273e901b5064 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Thu, 19 Mar 2026 10:29:33 +0100 Subject: [PATCH 068/129] crypto: caam - guard HMAC key hex dumps in hash_digest_key Use print_hex_dump_devel() for dumping sensitive HMAC key bytes in hash_digest_key() to avoid leaking secrets at runtime when CONFIG_DYNAMIC_DEBUG is enabled. Fixes: 045e36780f11 ("crypto: caam - ahash hmac support") Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms") Cc: stable@vger.kernel.org Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 4 ++-- drivers/crypto/caam/caamhash.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 553994228a17..854200850830 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -3270,7 +3270,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, dpaa2_fl_set_addr(out_fle, key_dma); dpaa2_fl_set_len(out_fle, digestsize); - print_hex_dump_debug("key_in@" __stringify(__LINE__)": ", + print_hex_dump_devel("key_in@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), @@ -3290,7 +3290,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, /* in progress */ wait_for_completion(&result.completion); ret = result.err; - print_hex_dump_debug("digested key@" __stringify(__LINE__)": ", + print_hex_dump_devel("digested key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e0a23c55c10e..72cfe00df3f4 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); - print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", + print_hex_dump_devel("key_in@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), @@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, wait_for_completion(&result.completion); ret = result.err; - print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", + print_hex_dump_devel("digested key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); } From 6ac142bf267ecf0aee5038abd00072ab583ce0de Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Thu, 19 Mar 2026 11:02:57 +0000 Subject: [PATCH 069/129] crypto: qat - add anti-rollback support for GEN6 devices Anti-Rollback (ARB) is a QAT GEN6 hardware feature that prevents loading firmware with a Security Version Number (SVN) lower than an authorized minimum. This protects against downgrade attacks by ensuring that only firmware at or above a committed SVN can run on the acceleration device. During firmware loading, the driver checks the SVN validation status via a hardware CSR. If the check reports a failure, firmware authentication is aborted. If it reports a retry status, the driver reissues the authentication command up to a maximum number of retries. Extend the firmware admin interface with two new messages, ICP_QAT_FW_SVN_READ and ICP_QAT_FW_SVN_COMMIT, to query and commit the SVN, respectively. Integrate the SVN check into the firmware authentication path in qat_uclo.c so the driver can react to anti-rollback status during device bring-up. Expose SVN information to userspace via a new sysfs attribute group, qat_svn, under the PCI device directory. The group provides read-only attributes for the active, enforced minimum, and permanent minimum SVN values, as well as a write-only commit attribute that allows a system administrator to commit the currently active SVN as the new authorized minimum. This is based on earlier work by Ciunas Bennett. Signed-off-by: Suman Kumar Chakraborty Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../ABI/testing/sysfs-driver-qat_svn | 114 +++++++++++++++ .../intel/qat/qat_6xxx/adf_6xxx_hw_data.c | 16 +++ .../intel/qat/qat_6xxx/adf_6xxx_hw_data.h | 6 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 2 + .../crypto/intel/qat/qat_common/adf_admin.c | 70 +++++++++ .../crypto/intel/qat/qat_common/adf_admin.h | 2 + .../crypto/intel/qat/qat_common/adf_anti_rb.c | 66 +++++++++ .../crypto/intel/qat/qat_common/adf_anti_rb.h | 37 +++++ .../crypto/intel/qat/qat_common/adf_init.c | 3 + .../intel/qat/qat_common/adf_sysfs_anti_rb.c | 133 ++++++++++++++++++ .../intel/qat/qat_common/adf_sysfs_anti_rb.h | 11 ++ .../qat/qat_common/icp_qat_fw_init_admin.h | 15 +- .../crypto/intel/qat/qat_common/qat_uclo.c | 25 +++- 14 files changed, 497 insertions(+), 5 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat_svn create mode 100644 drivers/crypto/intel/qat/qat_common/adf_anti_rb.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_anti_rb.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h diff --git a/Documentation/ABI/testing/sysfs-driver-qat_svn b/Documentation/ABI/testing/sysfs-driver-qat_svn new file mode 100644 index 000000000000..3832b523dcb0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_svn @@ -0,0 +1,114 @@ +What: /sys/bus/pci/devices//qat_svn/ +Date: June 2026 +KernelVersion: 7.1 +Contact: qat-linux@intel.com +Description: Directory containing Security Version Number (SVN) attributes for + the Anti-Rollback (ARB) feature. The ARB feature prevents downloading + older firmware versions to the acceleration device. + +What: /sys/bus/pci/devices//qat_svn/enforced_min +Date: June 2026 +KernelVersion: 7.1 +Contact: qat-linux@intel.com +Description: + (RO) Reports the minimum allowed firmware SVN. + + Returns an integer greater than zero. Firmware with SVN lower than + this value is rejected. + + A write to qat_svn/commit will update this value. The update is not + persistent across reboot; on reboot, this value is reset from + qat_svn/permanent_min. + + Example usage:: + + # cat /sys/bus/pci/devices//qat_svn/enforced_min + 2 + + This attribute is available only on devices that support + Anti-Rollback. + +What: /sys/bus/pci/devices//qat_svn/permanent_min +Date: June 2026 +KernelVersion: 7.1 +Contact: qat-linux@intel.com +Description: + (RO) Reports the persistent minimum SVN used to initialize + qat_svn/enforced_min on each reboot. + + Returns an integer greater than zero. A write to qat_svn/commit + may update this value, depending on platform/BIOS settings. + + Example usage:: + + # cat /sys/bus/pci/devices//qat_svn/permanent_min + 3 + + This attribute is available only on devices that support + Anti-Rollback. + +What: /sys/bus/pci/devices//qat_svn/active +Date: June 2026 +KernelVersion: 7.1 +Contact: qat-linux@intel.com +Description: + (RO) Reports the SVN of the currently active firmware image. + + Returns an integer greater than zero. + + Example usage:: + + # cat /sys/bus/pci/devices//qat_svn/active + 2 + + This attribute is available only on devices that support + Anti-Rollback. + +What: /sys/bus/pci/devices//qat_svn/commit +Date: June 2026 +KernelVersion: 7.1 +Contact: qat-linux@intel.com +Description: + (WO) Commits the currently active SVN as the minimum allowed SVN. + + Writing 1 sets qat_svn/enforced_min to the value of qat_svn/active, + preventing future firmware loads with lower SVN. + + Depending on platform/BIOS settings, a commit may also update + qat_svn/permanent_min. + + Note that on reboot, qat_svn/enforced_min reverts to + qat_svn/permanent_min. + + It is advisable to use this attribute with caution, only when + it is necessary to set a new minimum SVN for the firmware. + + Before committing the SVN update, it is crucial to check the + current values of qat_svn/active, qat_svn/enforced_min and + qat_svn/permanent_min. This verification helps ensure that the + commit operation aligns with the intended outcome. + + While writing to the file, any value other than '1' will result + in an error and have no effect. + + Example usage:: + + ## Read current values + # cat /sys/bus/pci/devices//qat_svn/enforced_min + 2 + # cat /sys/bus/pci/devices//qat_svn/permanent_min + 2 + # cat /sys/bus/pci/devices//qat_svn/active + 3 + + ## Commit active SVN + # echo 1 > /sys/bus/pci/devices//qat_svn/commit + + ## Read updated values + # cat /sys/bus/pci/devices//qat_svn/enforced_min + 3 + # cat /sys/bus/pci/devices//qat_svn/permanent_min + 3 + + This attribute is available only on devices that support + Anti-Rollback. diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c index f4c61978b048..177bc4eb3c24 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c @@ -462,6 +462,21 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number) return 0; } +static bool adf_anti_rb_enabled(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + + return !!(hw_data->fuses[0] & ADF_GEN6_ANTI_RB_FUSE_BIT); +} + +static void adf_gen6_init_anti_rb(struct adf_anti_rb_hw_data *anti_rb_data) +{ + anti_rb_data->anti_rb_enabled = adf_anti_rb_enabled; + anti_rb_data->svncheck_offset = ADF_GEN6_SVNCHECK_CSR_MSG; + anti_rb_data->svncheck_retry = 0; + anti_rb_data->sysfs_added = false; +} + static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; @@ -1024,6 +1039,7 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) adf_gen6_init_ras_ops(&hw_data->ras_ops); adf_gen6_init_tl_data(&hw_data->tl_data); adf_gen6_init_rl_data(&hw_data->rl_data); + adf_gen6_init_anti_rb(&hw_data->anti_rb_data); } void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h index fa31d6d584e6..e4d433bdd379 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h @@ -53,6 +53,12 @@ #define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578 #define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970 +/* Anti-rollback */ +#define ADF_GEN6_SVNCHECK_CSR_MSG 0x640004 + +/* Fuse bits */ +#define ADF_GEN6_ANTI_RB_FUSE_BIT BIT(24) + /* * Watchdog timers * Timeout is in cycles. Clock speed may vary across products but this diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 89845754841b..016b81e60cfb 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' intel_qat-y := adf_accel_engine.o \ adf_admin.o \ adf_aer.o \ + adf_anti_rb.o \ adf_bank_state.o \ adf_cfg.o \ adf_cfg_services.o \ @@ -29,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \ adf_rl_admin.o \ adf_rl.o \ adf_sysfs.o \ + adf_sysfs_anti_rb.o \ adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ adf_timer.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 9fe3239f0114..cac110215c5e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -11,6 +11,7 @@ #include #include #include +#include "adf_anti_rb.h" #include "adf_cfg_common.h" #include "adf_dc.h" #include "adf_rl.h" @@ -328,6 +329,7 @@ struct adf_hw_device_data { struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; struct adf_tl_hw_data tl_data; + struct adf_anti_rb_hw_data anti_rb_data; struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 573388c37100..841aa802c79e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -6,8 +6,10 @@ #include #include #include +#include #include "adf_accel_devices.h" #include "adf_admin.h" +#include "adf_anti_rb.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" @@ -19,6 +21,7 @@ #define ADF_ADMIN_POLL_DELAY_US 20 #define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_ONE_AE 1 +#define ADF_ADMIN_RETRY_MAX 60 static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -536,6 +539,73 @@ int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +static int adf_send_admin_retry(struct adf_accel_dev *accel_dev, u8 cmd_id, + struct icp_qat_fw_init_admin_resp *resp, + unsigned int sleep_ms) +{ + u32 admin_ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask; + struct icp_qat_fw_init_admin_req req = { }; + unsigned int retries = ADF_ADMIN_RETRY_MAX; + int ret; + + req.cmd_id = cmd_id; + + do { + ret = adf_send_admin(accel_dev, &req, resp, admin_ae_mask); + if (!ret) + return 0; + + if (resp->status != ICP_QAT_FW_INIT_RESP_STATUS_RETRY) + return ret; + + msleep(sleep_ms); + } while (--retries); + + return -ETIMEDOUT; +} + +static int adf_send_admin_svn(struct adf_accel_dev *accel_dev, u8 cmd_id, + struct icp_qat_fw_init_admin_resp *resp) +{ + return adf_send_admin_retry(accel_dev, cmd_id, resp, ADF_SVN_RETRY_MS); +} + +int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn) +{ + struct icp_qat_fw_init_admin_resp resp = { }; + int ret; + + ret = adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_READ, &resp); + if (ret) + return ret; + + switch (cmd) { + case ARB_ENFORCED_MIN_SVN: + *svn = resp.enforced_min_svn; + break; + case ARB_PERMANENT_MIN_SVN: + *svn = resp.permanent_min_svn; + break; + case ARB_ACTIVE_SVN: + *svn = resp.active_svn; + break; + default: + *svn = 0; + dev_err(&GET_DEV(accel_dev), + "Unknown secure version number request\n"); + ret = -EINVAL; + } + + return ret; +} + +int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev) +{ + struct icp_qat_fw_init_admin_resp resp = { }; + + return adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_COMMIT, &resp); +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h index 647c8e196752..9704219f2eb7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.h +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -27,5 +27,7 @@ int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, struct icp_qat_fw_init_admin_slice_cnt *slice_count); int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev); +int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn); +int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c new file mode 100644 index 000000000000..2c19a82d89ad --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2026 Intel Corporation */ +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_admin.h" +#include "adf_anti_rb.h" +#include "adf_common_drv.h" +#include "icp_qat_fw_init_admin.h" + +#define ADF_SVN_RETRY_MAX 60 + +int adf_anti_rb_commit(struct adf_accel_dev *accel_dev) +{ + return adf_send_admin_arb_commit(accel_dev); +} + +int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn) +{ + return adf_send_admin_arb_query(accel_dev, cmd, svn); +} + +int adf_anti_rb_check(struct pci_dev *pdev) +{ + struct adf_anti_rb_hw_data *anti_rb; + u32 svncheck_sts, cfc_svncheck_sts; + struct adf_accel_dev *accel_dev; + void __iomem *pmisc_addr; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return -EINVAL; + + anti_rb = GET_ANTI_RB_DATA(accel_dev); + if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev)) + return 0; + + pmisc_addr = adf_get_pmisc_base(accel_dev); + + cfc_svncheck_sts = ADF_CSR_RD(pmisc_addr, anti_rb->svncheck_offset); + + svncheck_sts = FIELD_GET(ADF_SVN_STS_MASK, cfc_svncheck_sts); + switch (svncheck_sts) { + case ADF_SVN_NO_STS: + return 0; + case ADF_SVN_PASS_STS: + anti_rb->svncheck_retry = 0; + return 0; + case ADF_SVN_FAIL_STS: + dev_err(&GET_DEV(accel_dev), "Security Version Number failure\n"); + return -EIO; + case ADF_SVN_RETRY_STS: + if (anti_rb->svncheck_retry++ >= ADF_SVN_RETRY_MAX) { + anti_rb->svncheck_retry = 0; + return -ETIMEDOUT; + } + msleep(ADF_SVN_RETRY_MS); + return -EAGAIN; + default: + dev_err(&GET_DEV(accel_dev), "Invalid SVN check status\n"); + return -EINVAL; + } +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h new file mode 100644 index 000000000000..531af41a3db8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2026 Intel Corporation */ +#ifndef ADF_ANTI_RB_H_ +#define ADF_ANTI_RB_H_ + +#include + +#define GET_ANTI_RB_DATA(accel_dev) (&(accel_dev)->hw_device->anti_rb_data) + +#define ADF_SVN_NO_STS 0x00 +#define ADF_SVN_PASS_STS 0x01 +#define ADF_SVN_RETRY_STS 0x02 +#define ADF_SVN_FAIL_STS 0x03 +#define ADF_SVN_RETRY_MS 250 +#define ADF_SVN_STS_MASK GENMASK(7, 0) + +enum anti_rb { + ARB_ENFORCED_MIN_SVN, + ARB_PERMANENT_MIN_SVN, + ARB_ACTIVE_SVN, +}; + +struct adf_accel_dev; +struct pci_dev; + +struct adf_anti_rb_hw_data { + bool (*anti_rb_enabled)(struct adf_accel_dev *accel_dev); + u32 svncheck_offset; + u32 svncheck_retry; + bool sysfs_added; +}; + +int adf_anti_rb_commit(struct adf_accel_dev *accel_dev); +int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn); +int adf_anti_rb_check(struct pci_dev *pdev); + +#endif /* ADF_ANTI_RB_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 46491048e0bb..ec376583b3ae 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -10,6 +10,7 @@ #include "adf_dbgfs.h" #include "adf_heartbeat.h" #include "adf_rl.h" +#include "adf_sysfs_anti_rb.h" #include "adf_sysfs_ras_counters.h" #include "adf_telemetry.h" @@ -263,6 +264,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) adf_dbgfs_add(accel_dev); adf_sysfs_start_ras(accel_dev); + adf_sysfs_start_arb(accel_dev); return 0; } @@ -292,6 +294,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); adf_sysfs_stop_ras(accel_dev); + adf_sysfs_stop_arb(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c new file mode 100644 index 000000000000..789341ad1bdc --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2026 Intel Corporation */ +#include +#include + +#include "adf_anti_rb.h" +#include "adf_common_drv.h" +#include "adf_sysfs_anti_rb.h" + +static ssize_t enforced_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + int err; + u8 svn; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + err = adf_anti_rb_query(accel_dev, ARB_ENFORCED_MIN_SVN, &svn); + if (err) + return err; + + return sysfs_emit(buf, "%u\n", svn); +} +static DEVICE_ATTR_RO(enforced_min); + +static ssize_t active_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + int err; + u8 svn; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + err = adf_anti_rb_query(accel_dev, ARB_ACTIVE_SVN, &svn); + if (err) + return err; + + return sysfs_emit(buf, "%u\n", svn); +} +static DEVICE_ATTR_RO(active); + +static ssize_t permanent_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + int err; + u8 svn; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + err = adf_anti_rb_query(accel_dev, ARB_PERMANENT_MIN_SVN, &svn); + if (err) + return err; + + return sysfs_emit(buf, "%u\n", svn); +} +static DEVICE_ATTR_RO(permanent_min); + +static ssize_t commit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool val; + int err; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + err = kstrtobool(buf, &val); + if (err) + return err; + + if (!val) + return -EINVAL; + + err = adf_anti_rb_commit(accel_dev); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_WO(commit); + +static struct attribute *qat_svn_attrs[] = { + &dev_attr_commit.attr, + &dev_attr_active.attr, + &dev_attr_enforced_min.attr, + &dev_attr_permanent_min.attr, + NULL +}; + +static const struct attribute_group qat_svn_group = { + .attrs = qat_svn_attrs, + .name = "qat_svn", +}; + +void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev) +{ + struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev); + + if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev)) + return; + + if (device_add_group(&GET_DEV(accel_dev), &qat_svn_group)) { + dev_warn(&GET_DEV(accel_dev), + "Failed to create qat_svn attribute group\n"); + return; + } + + anti_rb->sysfs_added = true; +} + +void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev) +{ + struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev); + + if (!anti_rb->sysfs_added) + return; + + device_remove_group(&GET_DEV(accel_dev), &qat_svn_group); + + anti_rb->sysfs_added = false; + anti_rb->svncheck_retry = 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h new file mode 100644 index 000000000000..f0c2b6e464f7 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2026 Intel Corporation */ +#ifndef ADF_SYSFS_ANTI_RB_H_ +#define ADF_SYSFS_ANTI_RB_H_ + +struct adf_accel_dev; + +void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev); +void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev); + +#endif /* ADF_SYSFS_ANTI_RB_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 63cf18e2a4e5..6b0f0d100cb9 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -31,11 +31,15 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_RL_REMOVE = 136, ICP_QAT_FW_TL_START = 137, ICP_QAT_FW_TL_STOP = 138, + ICP_QAT_FW_SVN_READ = 146, + ICP_QAT_FW_SVN_COMMIT = 147, }; enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, - ICP_QAT_FW_INIT_RESP_STATUS_FAIL + ICP_QAT_FW_INIT_RESP_STATUS_FAIL = 1, + ICP_QAT_FW_INIT_RESP_STATUS_RETRY = 2, + ICP_QAT_FW_INIT_RESP_STATUS_UNSUPPORTED = 4, }; struct icp_qat_fw_init_admin_tl_rp_indexes { @@ -159,6 +163,15 @@ struct icp_qat_fw_init_admin_resp { }; struct icp_qat_fw_init_admin_slice_cnt slices; __u16 fw_capabilities; + struct { + __u8 enforced_min_svn; + __u8 permanent_min_svn; + __u8 active_svn; + __u8 resrvd9; + __u16 svn_status; + __u16 resrvd10; + __u64 resrvd11; + }; }; } __packed; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index e61a367b0d17..a00ca2a0900f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -12,6 +12,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_anti_rb.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" #include "icp_qat_hal.h" @@ -1230,10 +1231,11 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { - u32 fcu_sts, retry = 0; + unsigned int retries = FW_AUTH_MAX_RETRY; u32 fcu_ctl_csr, fcu_sts_csr; u32 fcu_dram_hi_csr, fcu_dram_lo_csr; u64 bus_addr; + u32 fcu_sts; bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) - sizeof(struct icp_qat_auth_chunk); @@ -1248,17 +1250,32 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); do { + int arb_ret; + msleep(FW_AUTH_WAIT_PERIOD); fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr); + + arb_ret = adf_anti_rb_check(handle->pci_dev); + if (arb_ret == -EAGAIN) { + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) { + SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); + continue; + } + } else if (arb_ret) { + goto auth_fail; + } + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) goto auth_fail; + if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) return 0; - } while (retry++ < FW_AUTH_MAX_RETRY); + } while (--retries); + auth_fail: - pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n", - fcu_sts & FCU_AUTH_STS_MASK, retry); + pr_err("authentication error (FCU_STATUS = 0x%x)\n", fcu_sts & FCU_AUTH_STS_MASK); + return -EINVAL; } From d0c0a414cc1893b195b9523ecdfbeee00b98fd0d Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Thu, 19 Mar 2026 18:11:21 +0100 Subject: [PATCH 070/129] crypto: testmgr - Add test vectors for authenc(hmac(md5),rfc3686(ctr(aes))) Test vectors were generated starting from existing RFC3686(CTR(AES)) test vectors and adding HMAC(MD5) computed with software implementation. Then, the results were double-checked on Mediatek MT7986 (safexcel). Platform pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 7 ++ crypto/testmgr.h | 207 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 214 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 84beaa69cd59..9a9647175e4e 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4100,6 +4100,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .aead = __VECS(hmac_md5_ecb_cipher_null_tv_template) } + }, { + .alg = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .generic_driver = "authenc(hmac-md5-lib,rfc3686(ctr(aes-lib)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(hmac_md5_aes_ctr_rfc3686_tv_temp) + } }, { .alg = "authenc(hmac(sha1),cbc(aes))", .generic_driver = "authenc(hmac-sha1-lib,cbc(aes-lib))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 458fc8e87673..6995a576a15a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -17752,6 +17752,213 @@ static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = { }, }; +static const struct aead_testvec hmac_md5_aes_ctr_rfc3686_tv_temp[] = { + { /* RFC 3686 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\xae\x68\x52\xf8\x12\x10\x67\xcc" + "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" + "\x00\x00\x00\x30", + .klen = 8 + 16 + 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79" + "\x2d\x61\x75\xa3\x26\x13\x11\xb8" + "\xdd\x5f\xea\x13\x2a\xf2\xb0\xf1" + "\x91\x79\x46\x40\x62\x6c\x87\x5b", + .clen = 16 + 16, + }, { /* RFC 3686 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x7e\x24\x06\x78\x17\xfa\xe0\xd7" + "\x43\xd6\xce\x1f\x32\x53\x91\x63" + "\x00\x6c\xb6\xdb", + .klen = 8 + 16 + 20, + .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9" + "\x79\x0d\x41\xee\x8e\xda\xd3\x88" + "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8" + "\xfc\xe6\x30\xdf\x91\x41\xbe\x28" + "\x03\x39\x23\xcd\x22\x5f\x1b\x8b" + "\x93\x70\xbc\x45\xf3\xba\xde\x2e", + .clen = 32 + 16, + }, { /* RFC 3686 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x14" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x76\x91\xbe\x03\x5e\x50\x20\xa8" + "\xac\x6e\x61\x85\x29\xf9\xa0\xdc" + "\x00\xe0\x01\x7b", + .klen = 8 + 16 + 20, + .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23", + .plen = 36, + .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9" + "\xcf\x46\x52\xe9\xef\xdb\x72\xd7" + "\x45\x40\xa4\x2b\xde\x6d\x78\x36" + "\xd5\x9a\x5c\xea\xae\xf3\x10\x53" + "\x25\xb2\x07\x2f" + "\xb4\x40\x0c\x7b\x4c\x55\x8a\x4b" + "\x04\xf7\x48\x9e\x0f\x9a\xae\x73", + .clen = 36 + 16, + }, { /* RFC 3686 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79" + "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed" + "\x86\x3d\x06\xcc\xfd\xb7\x85\x15" + "\x00\x00\x00\x48", + .klen = 8 + 16 + 28, + .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8" + "\x4e\x79\x35\xa0\x03\xcb\xe9\x28" + "\xc4\x5d\xa1\x16\x6c\x2d\xa5\x43" + "\x60\x7b\x58\x98\x11\x9b\x50\x06", + .clen = 16 + 16, + }, { /* RFC 3686 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x1c" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c" + "\x19\xe7\x34\x08\x19\xe0\xf6\x9c" + "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a" + "\x00\x96\xb0\x3b", + .klen = 8 + 16 + 28, + .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32" + "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f" + "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c" + "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00" + "\xc5\xec\x47\x33\xae\x05\x28\x49" + "\xd5\x2b\x08\xad\x10\x98\x24\x01", + .clen = 32 + 16, + }, { /* RFC 3686 Case 7 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f" + "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c" + "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3" + "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04" + "\x00\x00\x00\x60", + .klen = 8 + 16 + 36, + .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2", + .alen = 8, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7" + "\x56\x08\x63\xdc\x71\xe3\xe0\xc0" + "\xc6\x26\xb2\x27\x0d\x21\xd4\x40" + "\x6c\x4f\x53\xea\x19\x75\xda\x8e", + .clen = 16 + 16, + }, { /* RFC 3686 Case 8 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x24" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb" + "\x07\x96\x36\x58\x79\xef\xf8\x86" + "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74" + "\x4b\x50\x59\x0c\x87\xa2\x38\x84" + "\x00\xfa\xac\x24", + .klen = 8 + 16 + 36, + .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75", + .alen = 8, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c" + "\x49\xee\x00\x0b\x80\x4e\xb2\xa9" + "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a" + "\x55\x30\x83\x1d\x93\x44\xaf\x1c" + "\x8c\x4d\x2a\x8d\x23\x47\x59\x6f" + "\x1e\x74\x62\x39\xed\x14\x50\x6c", + .clen = 32 + 16, + }, +}; + static const struct aead_testvec hmac_md5_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN From 1a9670df56eac0a374cc2a5e9a63775de4c61837 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 20 Mar 2026 09:49:13 +0100 Subject: [PATCH 071/129] crypto: stm32 - use list_first_entry_or_null to simplify hash_find_dev Use list_first_entry_or_null() to simplify stm32_hash_find_dev() and remove the now-unused local variable 'struct stm32_hash_dev *tmp'. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index d60147a7594e..dada5951082c 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -792,19 +792,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) { - struct stm32_hash_dev *hdev = NULL, *tmp; + struct stm32_hash_dev *hdev; spin_lock_bh(&stm32_hash.lock); - if (!ctx->hdev) { - list_for_each_entry(tmp, &stm32_hash.dev_list, list) { - hdev = tmp; - break; - } - ctx->hdev = hdev; - } else { - hdev = ctx->hdev; - } - + if (!ctx->hdev) + ctx->hdev = list_first_entry_or_null(&stm32_hash.dev_list, + struct stm32_hash_dev, list); + hdev = ctx->hdev; spin_unlock_bh(&stm32_hash.lock); return hdev; From 92c0a9bbcde6a748a40182fe32e3a1b2f9f1a23d Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 20 Mar 2026 09:49:14 +0100 Subject: [PATCH 072/129] crypto: stm32 - use list_first_entry_or_null to simplify cryp_find_dev Use list_first_entry_or_null() to simplify stm32_cryp_find_dev() and remove the now-unused local variable 'struct stm32_cryp *tmp'. Signed-off-by: Thorsten Blum Reviewed-by: Kees Cook Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-cryp.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 3c9b3f679461..b79877099942 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -361,19 +361,13 @@ static int stm32_cryp_it_start(struct stm32_cryp *cryp); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { - struct stm32_cryp *tmp, *cryp = NULL; + struct stm32_cryp *cryp; spin_lock_bh(&cryp_list.lock); - if (!ctx->cryp) { - list_for_each_entry(tmp, &cryp_list.dev_list, list) { - cryp = tmp; - break; - } - ctx->cryp = cryp; - } else { - cryp = ctx->cryp; - } - + if (!ctx->cryp) + ctx->cryp = list_first_entry_or_null(&cryp_list.dev_list, + struct stm32_cryp, list); + cryp = ctx->cryp; spin_unlock_bh(&cryp_list.lock); return cryp; From 37b902c60304291b30b417ab5b9531b9c662aacd Mon Sep 17 00:00:00 2001 From: T Pratham Date: Fri, 20 Mar 2026 16:20:51 +0530 Subject: [PATCH 073/129] crypto: ti - Add support for AES-GCM in DTHEv2 driver AES-GCM is an AEAD algorithm supporting both encryption and authentication of data. This patch introduces support for AES-GCM as the first AEAD algorithm supported by the DTHEv2 driver. Signed-off-by: T Pratham Signed-off-by: Herbert Xu --- drivers/crypto/ti/Kconfig | 2 + drivers/crypto/ti/dthev2-aes.c | 618 +++++++++++++++++++++++++++++- drivers/crypto/ti/dthev2-common.h | 11 +- 3 files changed, 628 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig index 6027e12de279..221e48373743 100644 --- a/drivers/crypto/ti/Kconfig +++ b/drivers/crypto/ti/Kconfig @@ -8,6 +8,8 @@ config CRYPTO_DEV_TI_DTHEV2 select CRYPTO_CBC select CRYPTO_CTR select CRYPTO_XTS + select CRYPTO_GCM + select SG_SPLIT help This enables support for the TI DTHE V2 hw cryptography engine which can be found on TI K3 SOCs. Selecting this enables use diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c index bf7d4dcb4cd7..5583386decbe 100644 --- a/drivers/crypto/ti/dthev2-aes.c +++ b/drivers/crypto/ti/dthev2-aes.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -19,6 +20,7 @@ #include #include #include +#include #include /* Registers */ @@ -53,6 +55,7 @@ #define DTHE_P_AES_C_LENGTH_1 0x0058 #define DTHE_P_AES_AUTH_LENGTH 0x005C #define DTHE_P_AES_DATA_IN_OUT 0x0060 +#define DTHE_P_AES_TAG_OUT 0x0070 #define DTHE_P_AES_SYSCONFIG 0x0084 #define DTHE_P_AES_IRQSTATUS 0x008C @@ -65,6 +68,7 @@ enum aes_ctrl_mode_masks { AES_CTRL_CBC_MASK = BIT(5), AES_CTRL_CTR_MASK = BIT(6), AES_CTRL_XTS_MASK = BIT(12) | BIT(11), + AES_CTRL_GCM_MASK = BIT(17) | BIT(16) | BIT(6), }; #define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5) @@ -91,6 +95,8 @@ enum aes_ctrl_mode_masks { #define AES_IV_SIZE AES_BLOCK_SIZE #define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32)) #define AES_IV_WORDS AES_BLOCK_WORDS +#define DTHE_AES_GCM_AAD_MAXLEN (BIT_ULL(32) - 1) +#define POLL_TIMEOUT_INTERVAL HZ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm) { @@ -266,6 +272,9 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, case DTHE_AES_XTS: ctrl_val |= AES_CTRL_XTS_MASK; break; + case DTHE_AES_GCM: + ctrl_val |= AES_CTRL_GCM_MASK; + break; } if (iv_in) { @@ -542,6 +551,575 @@ static int dthe_aes_decrypt(struct skcipher_request *req) return dthe_aes_crypt(req); } +static int dthe_aead_init_tfm(struct crypto_aead *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct dthe_data *dev_data = dthe_get_dev(ctx); + + ctx->dev_data = dev_data; + + const char *alg_name = crypto_tfm_alg_name(crypto_aead_tfm(tfm)); + + ctx->aead_fb = crypto_alloc_sync_aead(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->aead_fb)) { + dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n", + alg_name); + return PTR_ERR(ctx->aead_fb); + } + + return 0; +} + +static void dthe_aead_exit_tfm(struct crypto_aead *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_sync_aead(ctx->aead_fb); +} + +/** + * dthe_aead_prep_aad - Prepare AAD scatterlist from input request + * @sg: Input scatterlist containing AAD + * @assoclen: Length of AAD + * @pad_buf: Buffer to hold AAD padding if needed + * + * Description: + * Creates a scatterlist containing only the AAD portion with padding + * to align to AES_BLOCK_SIZE. This simplifies DMA handling by allowing + * AAD to be sent separately via TX-only DMA. + * + * Return: + * Pointer to the AAD scatterlist, or ERR_PTR(error) on failure. + * The calling function needs to free the returned scatterlist when done. + **/ +static struct scatterlist *dthe_aead_prep_aad(struct scatterlist *sg, + unsigned int assoclen, + u8 *pad_buf) +{ + struct scatterlist *aad_sg; + struct scatterlist *to_sg; + int aad_nents; + + if (assoclen == 0) + return NULL; + + aad_nents = sg_nents_for_len(sg, assoclen); + if (assoclen % AES_BLOCK_SIZE) + aad_nents++; + + aad_sg = kmalloc_array(aad_nents, sizeof(struct scatterlist), GFP_ATOMIC); + if (!aad_sg) + return ERR_PTR(-ENOMEM); + + sg_init_table(aad_sg, aad_nents); + to_sg = dthe_copy_sg(aad_sg, sg, assoclen); + if (assoclen % AES_BLOCK_SIZE) { + unsigned int pad_len = AES_BLOCK_SIZE - (assoclen % AES_BLOCK_SIZE); + + memset(pad_buf, 0, pad_len); + sg_set_buf(to_sg, pad_buf, pad_len); + } + + return aad_sg; +} + +/** + * dthe_aead_prep_crypt - Prepare crypt scatterlist from req->src/req->dst + * @sg: Input req->src/req->dst scatterlist + * @assoclen: Length of AAD (to skip) + * @cryptlen: Length of ciphertext/plaintext (minus the size of TAG in decryption) + * @pad_buf: Zeroed buffer to hold crypt padding if needed + * + * Description: + * Creates a scatterlist containing only the ciphertext/plaintext portion + * (skipping AAD) with padding to align to AES_BLOCK_SIZE. + * + * Return: + * Pointer to the ciphertext scatterlist, or ERR_PTR(error) on failure. + * The calling function needs to free the returned scatterlist when done. + **/ +static struct scatterlist *dthe_aead_prep_crypt(struct scatterlist *sg, + unsigned int assoclen, + unsigned int cryptlen, + u8 *pad_buf) +{ + struct scatterlist *out_sg[1]; + struct scatterlist *crypt_sg; + struct scatterlist *to_sg; + size_t split_sizes[1] = {cryptlen}; + int out_mapped_nents[1]; + int crypt_nents; + int err; + + if (cryptlen == 0) + return NULL; + + /* Skip AAD, extract ciphertext portion */ + err = sg_split(sg, 0, assoclen, 1, split_sizes, out_sg, out_mapped_nents, GFP_ATOMIC); + if (err) + goto dthe_aead_prep_crypt_split_err; + + crypt_nents = sg_nents_for_len(out_sg[0], cryptlen); + if (cryptlen % AES_BLOCK_SIZE) + crypt_nents++; + + crypt_sg = kmalloc_array(crypt_nents, sizeof(struct scatterlist), GFP_ATOMIC); + if (!crypt_sg) { + err = -ENOMEM; + goto dthe_aead_prep_crypt_mem_err; + } + + sg_init_table(crypt_sg, crypt_nents); + to_sg = dthe_copy_sg(crypt_sg, out_sg[0], cryptlen); + if (cryptlen % AES_BLOCK_SIZE) { + unsigned int pad_len = AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE); + + sg_set_buf(to_sg, pad_buf, pad_len); + } + +dthe_aead_prep_crypt_mem_err: + kfree(out_sg[0]); + +dthe_aead_prep_crypt_split_err: + if (err) + return ERR_PTR(err); + return crypt_sg; +} + +static int dthe_aead_read_tag(struct dthe_tfm_ctx *ctx, u32 *tag) +{ + struct dthe_data *dev_data = dthe_get_dev(ctx); + void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE; + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(aes_base_reg + DTHE_P_AES_CTRL, val, + (val & DTHE_AES_CTRL_SAVED_CTX_READY), + 0, POLL_TIMEOUT_INTERVAL); + if (ret) + return ret; + + for (int i = 0; i < AES_BLOCK_WORDS; ++i) + tag[i] = readl_relaxed(aes_base_reg + + DTHE_P_AES_TAG_OUT + + DTHE_REG_SIZE * i); + return 0; +} + +static int dthe_aead_enc_get_tag(struct aead_request *req) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + u32 tag[AES_BLOCK_WORDS]; + int nents; + int ret; + + ret = dthe_aead_read_tag(ctx, tag); + if (ret) + return ret; + + nents = sg_nents_for_len(req->dst, req->cryptlen + req->assoclen + ctx->authsize); + + sg_pcopy_from_buffer(req->dst, nents, tag, ctx->authsize, + req->assoclen + req->cryptlen); + + return 0; +} + +static int dthe_aead_dec_verify_tag(struct aead_request *req) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + u32 tag_out[AES_BLOCK_WORDS]; + u32 tag_in[AES_BLOCK_WORDS]; + int nents; + int ret; + + ret = dthe_aead_read_tag(ctx, tag_out); + if (ret) + return ret; + + nents = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); + + sg_pcopy_to_buffer(req->src, nents, tag_in, ctx->authsize, + req->assoclen + req->cryptlen - ctx->authsize); + + if (crypto_memneq(tag_in, tag_out, ctx->authsize)) + return -EBADMSG; + else + return 0; +} + +static int dthe_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) + return -EINVAL; + + ctx->aes_mode = DTHE_AES_GCM; + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + + crypto_sync_aead_clear_flags(ctx->aead_fb, CRYPTO_TFM_REQ_MASK); + crypto_sync_aead_set_flags(ctx->aead_fb, + crypto_aead_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + + return crypto_sync_aead_setkey(ctx->aead_fb, key, keylen); +} + +static int dthe_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + + /* Invalid auth size will be handled by crypto_aead_setauthsize() */ + ctx->authsize = authsize; + + return crypto_sync_aead_setauthsize(ctx->aead_fb, authsize); +} + +static int dthe_aead_do_fallback(struct aead_request *req) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + + SYNC_AEAD_REQUEST_ON_STACK(subreq, ctx->aead_fb); + + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return rctx->enc ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); +} + +static void dthe_aead_dma_in_callback(void *data) +{ + struct aead_request *req = (struct aead_request *)data; + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + + complete(&rctx->aes_compl); +} + +static int dthe_aead_run(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, base); + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + struct dthe_data *dev_data = dthe_get_dev(ctx); + + unsigned int cryptlen = req->cryptlen; + unsigned int assoclen = req->assoclen; + unsigned int authsize = ctx->authsize; + unsigned int unpadded_cryptlen; + struct scatterlist *src = NULL; + struct scatterlist *dst = NULL; + struct scatterlist *aad_sg = NULL; + u32 iv_in[AES_IV_WORDS]; + + int aad_nents = 0; + int src_nents = 0; + int dst_nents = 0; + int aad_mapped_nents = 0; + int src_mapped_nents = 0; + int dst_mapped_nents = 0; + + u8 *src_assoc_padbuf = rctx->padding; + u8 *src_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE; + u8 *dst_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE; + + bool diff_dst; + enum dma_data_direction aad_dir, src_dir, dst_dir; + + struct device *tx_dev, *rx_dev; + struct dma_async_tx_descriptor *desc_in, *desc_out, *desc_aad_out; + + int ret; + int err; + + void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE; + + u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE); + u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG); + + aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN; + writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG); + + aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL; + writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE); + + /* In decryption, the last authsize bytes are the TAG */ + if (!rctx->enc) + cryptlen -= authsize; + unpadded_cryptlen = cryptlen; + + memset(src_assoc_padbuf, 0, AES_BLOCK_SIZE); + memset(src_crypt_padbuf, 0, AES_BLOCK_SIZE); + memset(dst_crypt_padbuf, 0, AES_BLOCK_SIZE); + + tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx); + rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx); + + if (req->src == req->dst) { + diff_dst = false; + src_dir = DMA_BIDIRECTIONAL; + dst_dir = DMA_BIDIRECTIONAL; + } else { + diff_dst = true; + src_dir = DMA_TO_DEVICE; + dst_dir = DMA_FROM_DEVICE; + } + aad_dir = DMA_TO_DEVICE; + + /* Prep AAD scatterlist (always from req->src) */ + aad_sg = dthe_aead_prep_aad(req->src, req->assoclen, src_assoc_padbuf); + if (IS_ERR(aad_sg)) { + ret = PTR_ERR(aad_sg); + goto aead_prep_aad_err; + } + + /* Prep ciphertext src scatterlist */ + src = dthe_aead_prep_crypt(req->src, req->assoclen, cryptlen, src_crypt_padbuf); + if (IS_ERR(src)) { + ret = PTR_ERR(src); + goto aead_prep_src_err; + } + + /* Prep ciphertext dst scatterlist (only if separate dst) */ + if (diff_dst) { + dst = dthe_aead_prep_crypt(req->dst, req->assoclen, unpadded_cryptlen, + dst_crypt_padbuf); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + goto aead_prep_dst_err; + } + } else { + dst = src; + } + + /* Calculate padded lengths for nents calculations */ + if (req->assoclen % AES_BLOCK_SIZE) + assoclen += AES_BLOCK_SIZE - (req->assoclen % AES_BLOCK_SIZE); + if (cryptlen % AES_BLOCK_SIZE) + cryptlen += AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE); + + if (assoclen != 0) { + /* Map AAD for TX only */ + aad_nents = sg_nents_for_len(aad_sg, assoclen); + aad_mapped_nents = dma_map_sg(tx_dev, aad_sg, aad_nents, aad_dir); + if (aad_mapped_nents == 0) { + dev_err(dev_data->dev, "Failed to map AAD for TX\n"); + ret = -EINVAL; + goto aead_dma_map_aad_err; + } + + /* Prepare DMA descriptors for AAD TX */ + desc_aad_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, aad_sg, + aad_mapped_nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_aad_out) { + dev_err(dev_data->dev, "AAD TX prep_slave_sg() failed\n"); + ret = -EINVAL; + goto aead_dma_prep_aad_err; + } + } + + if (cryptlen != 0) { + /* Map ciphertext src for TX (BIDIRECTIONAL if in-place) */ + src_nents = sg_nents_for_len(src, cryptlen); + src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir); + if (src_mapped_nents == 0) { + dev_err(dev_data->dev, "Failed to map ciphertext src for TX\n"); + ret = -EINVAL; + goto aead_dma_prep_aad_err; + } + + /* Prepare DMA descriptors for ciphertext TX */ + desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src, + src_mapped_nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_out) { + dev_err(dev_data->dev, "Ciphertext TX prep_slave_sg() failed\n"); + ret = -EINVAL; + goto aead_dma_prep_src_err; + } + + /* Map ciphertext dst for RX (only if separate dst) */ + if (diff_dst) { + dst_nents = sg_nents_for_len(dst, cryptlen); + dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir); + if (dst_mapped_nents == 0) { + dev_err(dev_data->dev, "Failed to map ciphertext dst for RX\n"); + ret = -EINVAL; + goto aead_dma_prep_src_err; + } + } else { + dst_nents = src_nents; + dst_mapped_nents = src_mapped_nents; + } + + /* Prepare DMA descriptor for ciphertext RX */ + desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst, + dst_mapped_nents, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_in) { + dev_err(dev_data->dev, "Ciphertext RX prep_slave_sg() failed\n"); + ret = -EINVAL; + goto aead_dma_prep_dst_err; + } + + desc_in->callback = dthe_aead_dma_in_callback; + desc_in->callback_param = req; + } else if (assoclen != 0) { + /* AAD-only operation */ + desc_aad_out->callback = dthe_aead_dma_in_callback; + desc_aad_out->callback_param = req; + } + + init_completion(&rctx->aes_compl); + + /* + * HACK: There is an unknown hw issue where if the previous operation had alen = 0 and + * plen != 0, the current operation's tag calculation is incorrect in the case where + * plen = 0 and alen != 0 currently. This is a workaround for now which somehow works; + * by resetting the context by writing a 1 to the C_LENGTH_0 and AUTH_LENGTH registers. + */ + if (cryptlen == 0) { + writel_relaxed(1, aes_base_reg + DTHE_P_AES_C_LENGTH_0); + writel_relaxed(1, aes_base_reg + DTHE_P_AES_AUTH_LENGTH); + } + + if (req->iv) { + memcpy(iv_in, req->iv, GCM_AES_IV_SIZE); + } else { + iv_in[0] = 0; + iv_in[1] = 0; + iv_in[2] = 0; + } + iv_in[3] = 0x01000000; + + /* Clear key2 to reset previous GHASH intermediate data */ + for (int i = 0; i < AES_KEYSIZE_256 / sizeof(u32); ++i) + writel_relaxed(0, aes_base_reg + DTHE_P_AES_KEY2_6 + DTHE_REG_SIZE * i); + + dthe_aes_set_ctrl_key(ctx, rctx, iv_in); + + writel_relaxed(lower_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0); + writel_relaxed(upper_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1); + writel_relaxed(req->assoclen, aes_base_reg + DTHE_P_AES_AUTH_LENGTH); + + /* Submit DMA descriptors: AAD TX, ciphertext TX, ciphertext RX */ + if (assoclen != 0) + dmaengine_submit(desc_aad_out); + if (cryptlen != 0) { + dmaengine_submit(desc_out); + dmaengine_submit(desc_in); + } + + if (cryptlen != 0) + dma_async_issue_pending(dev_data->dma_aes_rx); + dma_async_issue_pending(dev_data->dma_aes_tx); + + /* Need to do timeout to ensure finalise gets called if DMA callback fails for any reason */ + ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS)); + if (!ret) { + ret = -ETIMEDOUT; + if (cryptlen != 0) + dmaengine_terminate_sync(dev_data->dma_aes_rx); + dmaengine_terminate_sync(dev_data->dma_aes_tx); + + for (int i = 0; i < AES_BLOCK_WORDS; ++i) + readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + DTHE_REG_SIZE * i); + } else { + ret = 0; + } + + if (cryptlen != 0) + dma_sync_sg_for_cpu(rx_dev, dst, dst_nents, dst_dir); + + if (rctx->enc) + err = dthe_aead_enc_get_tag(req); + else + err = dthe_aead_dec_verify_tag(req); + + ret = (ret) ? ret : err; + +aead_dma_prep_dst_err: + if (diff_dst && cryptlen != 0) + dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir); +aead_dma_prep_src_err: + if (cryptlen != 0) + dma_unmap_sg(tx_dev, src, src_nents, src_dir); +aead_dma_prep_aad_err: + if (assoclen != 0) + dma_unmap_sg(tx_dev, aad_sg, aad_nents, aad_dir); + +aead_dma_map_aad_err: + if (diff_dst && cryptlen != 0) + kfree(dst); +aead_prep_dst_err: + if (cryptlen != 0) + kfree(src); +aead_prep_src_err: + if (assoclen != 0) + kfree(aad_sg); + +aead_prep_aad_err: + memzero_explicit(rctx->padding, 2 * AES_BLOCK_SIZE); + + if (ret) + ret = dthe_aead_do_fallback(req); + + local_bh_disable(); + crypto_finalize_aead_request(engine, req, ret); + local_bh_enable(); + return 0; +} + +static int dthe_aead_crypt(struct aead_request *req) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + struct dthe_data *dev_data = dthe_get_dev(ctx); + struct crypto_engine *engine; + unsigned int cryptlen = req->cryptlen; + + /* In decryption, last authsize bytes are the TAG */ + if (!rctx->enc) + cryptlen -= ctx->authsize; + + /* + * Need to fallback to software in the following cases due to HW restrictions: + * - Both AAD and plaintext/ciphertext are zero length + * - AAD length is more than 2^32 - 1 bytes + * PS: req->cryptlen is currently unsigned int type, which causes the above condition + * tautologically false. If req->cryptlen were to be changed to a 64-bit type, + * the check for this would need to be added below. + */ + if (req->assoclen == 0 && cryptlen == 0) + return dthe_aead_do_fallback(req); + + engine = dev_data->engine; + return crypto_transfer_aead_request_to_engine(engine, req); +} + +static int dthe_aead_encrypt(struct aead_request *req) +{ + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + + rctx->enc = 1; + return dthe_aead_crypt(req); +} + +static int dthe_aead_decrypt(struct aead_request *req) +{ + struct dthe_aes_req_ctx *rctx = aead_request_ctx(req); + + rctx->enc = 0; + return dthe_aead_crypt(req); +} + static struct skcipher_engine_alg cipher_algs[] = { { .base.init = dthe_cipher_init_tfm, @@ -640,12 +1218,50 @@ static struct skcipher_engine_alg cipher_algs[] = { }, /* XTS AES */ }; +static struct aead_engine_alg aead_algs[] = { + { + .base.init = dthe_aead_init_tfm, + .base.exit = dthe_aead_exit_tfm, + .base.setkey = dthe_aead_setkey, + .base.setauthsize = dthe_aead_setauthsize, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.encrypt = dthe_aead_encrypt, + .base.decrypt = dthe_aead_decrypt, + .base.chunksize = AES_BLOCK_SIZE, + .base.ivsize = GCM_AES_IV_SIZE, + .base.base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-dthev2", + .cra_priority = 299, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct dthe_tfm_ctx), + .cra_reqsize = sizeof(struct dthe_aes_req_ctx), + .cra_module = THIS_MODULE, + }, + .op.do_one_request = dthe_aead_run, + }, /* GCM AES */ +}; + int dthe_register_aes_algs(void) { - return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs)); + int ret = 0; + + ret = crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs)); + if (ret) + return ret; + ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + if (ret) + crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs)); + + return ret; } void dthe_unregister_aes_algs(void) { crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs)); + crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); } diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h index 5239ee93c944..8514f0df8ac3 100644 --- a/drivers/crypto/ti/dthev2-common.h +++ b/drivers/crypto/ti/dthev2-common.h @@ -38,6 +38,7 @@ enum dthe_aes_mode { DTHE_AES_CBC, DTHE_AES_CTR, DTHE_AES_XTS, + DTHE_AES_GCM, }; /* Driver specific struct definitions */ @@ -78,16 +79,22 @@ struct dthe_list { * struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2 * @dev_data: Device data struct pointer * @keylen: AES key length + * @authsize: Authentication size for modes with authentication * @key: AES key * @aes_mode: AES mode + * @aead_fb: Fallback crypto aead handle * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode */ struct dthe_tfm_ctx { struct dthe_data *dev_data; unsigned int keylen; + unsigned int authsize; u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)]; enum dthe_aes_mode aes_mode; - struct crypto_sync_skcipher *skcipher_fb; + union { + struct crypto_sync_aead *aead_fb; + struct crypto_sync_skcipher *skcipher_fb; + }; }; /** @@ -98,7 +105,7 @@ struct dthe_tfm_ctx { */ struct dthe_aes_req_ctx { int enc; - u8 padding[AES_BLOCK_SIZE]; + u8 padding[2 * AES_BLOCK_SIZE]; struct completion aes_compl; }; From a09c5e06498f6a62c89ca56ccdfbfead96e63732 Mon Sep 17 00:00:00 2001 From: T Pratham Date: Fri, 20 Mar 2026 16:20:52 +0530 Subject: [PATCH 074/129] crypto: ti - Add support for AES-CCM in DTHEv2 driver AES-CCM is an AEAD algorithm supporting both encryption and authentication of data. This patch introduces support for AES-CCM AEAD algorithm in the DTHEv2 driver. Signed-off-by: T Pratham Signed-off-by: Herbert Xu --- drivers/crypto/ti/Kconfig | 1 + drivers/crypto/ti/dthev2-aes.c | 140 ++++++++++++++++++++++++++---- drivers/crypto/ti/dthev2-common.h | 1 + 3 files changed, 126 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig index 221e48373743..1a3a571ac8ce 100644 --- a/drivers/crypto/ti/Kconfig +++ b/drivers/crypto/ti/Kconfig @@ -9,6 +9,7 @@ config CRYPTO_DEV_TI_DTHEV2 select CRYPTO_CTR select CRYPTO_XTS select CRYPTO_GCM + select CRYPTO_CCM select SG_SPLIT help This enables support for the TI DTHE V2 hw cryptography engine diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c index 5583386decbe..eb5cd902dfb5 100644 --- a/drivers/crypto/ti/dthev2-aes.c +++ b/drivers/crypto/ti/dthev2-aes.c @@ -16,6 +16,7 @@ #include "dthev2-common.h" +#include #include #include #include @@ -69,6 +70,7 @@ enum aes_ctrl_mode_masks { AES_CTRL_CTR_MASK = BIT(6), AES_CTRL_XTS_MASK = BIT(12) | BIT(11), AES_CTRL_GCM_MASK = BIT(17) | BIT(16) | BIT(6), + AES_CTRL_CCM_MASK = BIT(18) | BIT(6), }; #define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5) @@ -81,6 +83,11 @@ enum aes_ctrl_mode_masks { #define DTHE_AES_CTRL_CTR_WIDTH_128B (BIT(7) | BIT(8)) +#define DTHE_AES_CCM_L_FROM_IV_MASK GENMASK(2, 0) +#define DTHE_AES_CCM_M_BITS GENMASK(2, 0) +#define DTHE_AES_CTRL_CCM_L_FIELD_MASK GENMASK(21, 19) +#define DTHE_AES_CTRL_CCM_M_FIELD_MASK GENMASK(24, 22) + #define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29) #define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0) @@ -96,6 +103,8 @@ enum aes_ctrl_mode_masks { #define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32)) #define AES_IV_WORDS AES_BLOCK_WORDS #define DTHE_AES_GCM_AAD_MAXLEN (BIT_ULL(32) - 1) +#define DTHE_AES_CCM_AAD_MAXLEN (BIT(16) - BIT(8)) +#define DTHE_AES_CCM_CRYPT_MAXLEN (BIT_ULL(61) - 1) #define POLL_TIMEOUT_INTERVAL HZ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm) @@ -275,6 +284,13 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, case DTHE_AES_GCM: ctrl_val |= AES_CTRL_GCM_MASK; break; + case DTHE_AES_CCM: + ctrl_val |= AES_CTRL_CCM_MASK; + ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_L_FIELD_MASK, + (iv_in[0] & DTHE_AES_CCM_L_FROM_IV_MASK)); + ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_M_FIELD_MASK, + ((ctx->authsize - 2) >> 1) & DTHE_AES_CCM_M_BITS); + break; } if (iv_in) { @@ -756,10 +772,6 @@ static int dthe_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; - ctx->aes_mode = DTHE_AES_GCM; - ctx->keylen = keylen; - memcpy(ctx->key, key, keylen); - crypto_sync_aead_clear_flags(ctx->aead_fb, CRYPTO_TFM_REQ_MASK); crypto_sync_aead_set_flags(ctx->aead_fb, crypto_aead_get_flags(tfm) & @@ -768,6 +780,38 @@ static int dthe_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int return crypto_sync_aead_setkey(ctx->aead_fb, key, keylen); } +static int dthe_gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + ret = dthe_aead_setkey(tfm, key, keylen); + if (ret) + return ret; + + ctx->aes_mode = DTHE_AES_GCM; + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + + return ret; +} + +static int dthe_ccm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + ret = dthe_aead_setkey(tfm, key, keylen); + if (ret) + return ret; + + ctx->aes_mode = DTHE_AES_CCM; + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + + return ret; +} + static int dthe_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm); @@ -990,14 +1034,18 @@ static int dthe_aead_run(struct crypto_engine *engine, void *areq) writel_relaxed(1, aes_base_reg + DTHE_P_AES_AUTH_LENGTH); } - if (req->iv) { - memcpy(iv_in, req->iv, GCM_AES_IV_SIZE); + if (ctx->aes_mode == DTHE_AES_GCM) { + if (req->iv) { + memcpy(iv_in, req->iv, GCM_AES_IV_SIZE); + } else { + iv_in[0] = 0; + iv_in[1] = 0; + iv_in[2] = 0; + } + iv_in[3] = 0x01000000; } else { - iv_in[0] = 0; - iv_in[1] = 0; - iv_in[2] = 0; + memcpy(iv_in, req->iv, AES_IV_SIZE); } - iv_in[3] = 0x01000000; /* Clear key2 to reset previous GHASH intermediate data */ for (int i = 0; i < AES_KEYSIZE_256 / sizeof(u32); ++i) @@ -1084,20 +1132,55 @@ static int dthe_aead_crypt(struct aead_request *req) struct dthe_data *dev_data = dthe_get_dev(ctx); struct crypto_engine *engine; unsigned int cryptlen = req->cryptlen; + bool is_zero_ctr = true; /* In decryption, last authsize bytes are the TAG */ if (!rctx->enc) cryptlen -= ctx->authsize; + if (ctx->aes_mode == DTHE_AES_CCM) { + /* + * For CCM Mode, the 128-bit IV contains the following: + * | 0 .. 2 | 3 .. 7 | 8 .. (127-8*L) | (128-8*L) .. 127 | + * | L-1 | Zero | Nonce | Counter | + * L needs to be between 2-8 (inclusive), i.e. 1 <= (L-1) <= 7 + * and the next 5 bits need to be zeroes. Else return -EINVAL + */ + u8 *iv = req->iv; + u8 L = iv[0]; + + /* variable L stores L-1 here */ + if (L < 1 || L > 7) + return -EINVAL; + /* + * DTHEv2 HW can only work with zero initial counter in CCM mode. + * Check if the initial counter value is zero or not + */ + for (int i = 0; i < L + 1; ++i) { + if (iv[AES_IV_SIZE - 1 - i] != 0) { + is_zero_ctr = false; + break; + } + } + } + /* * Need to fallback to software in the following cases due to HW restrictions: * - Both AAD and plaintext/ciphertext are zero length - * - AAD length is more than 2^32 - 1 bytes - * PS: req->cryptlen is currently unsigned int type, which causes the above condition - * tautologically false. If req->cryptlen were to be changed to a 64-bit type, - * the check for this would need to be added below. + * - For AES-GCM, AAD length is more than 2^32 - 1 bytes + * - For AES-CCM, AAD length is more than 2^16 - 2^8 bytes + * - For AES-CCM, plaintext/ciphertext length is more than 2^61 - 1 bytes + * - For AES-CCM, AAD length is non-zero but plaintext/ciphertext length is zero + * - For AES-CCM, the initial counter (last L+1 bytes of IV) is not all zeroes + * + * PS: req->cryptlen is currently unsigned int type, which causes the second and fourth + * cases above tautologically false. If req->cryptlen is to be changed to a 64-bit + * type, the check for these would also need to be added below. */ - if (req->assoclen == 0 && cryptlen == 0) + if ((req->assoclen == 0 && cryptlen == 0) || + (ctx->aes_mode == DTHE_AES_CCM && req->assoclen > DTHE_AES_CCM_AAD_MAXLEN) || + (ctx->aes_mode == DTHE_AES_CCM && cryptlen == 0) || + (ctx->aes_mode == DTHE_AES_CCM && !is_zero_ctr)) return dthe_aead_do_fallback(req); engine = dev_data->engine; @@ -1222,7 +1305,7 @@ static struct aead_engine_alg aead_algs[] = { { .base.init = dthe_aead_init_tfm, .base.exit = dthe_aead_exit_tfm, - .base.setkey = dthe_aead_setkey, + .base.setkey = dthe_gcm_aes_setkey, .base.setauthsize = dthe_aead_setauthsize, .base.maxauthsize = AES_BLOCK_SIZE, .base.encrypt = dthe_aead_encrypt, @@ -1244,6 +1327,31 @@ static struct aead_engine_alg aead_algs[] = { }, .op.do_one_request = dthe_aead_run, }, /* GCM AES */ + { + .base.init = dthe_aead_init_tfm, + .base.exit = dthe_aead_exit_tfm, + .base.setkey = dthe_ccm_aes_setkey, + .base.setauthsize = dthe_aead_setauthsize, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.encrypt = dthe_aead_encrypt, + .base.decrypt = dthe_aead_decrypt, + .base.chunksize = AES_BLOCK_SIZE, + .base.ivsize = AES_IV_SIZE, + .base.base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-dthev2", + .cra_priority = 299, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct dthe_tfm_ctx), + .cra_reqsize = sizeof(struct dthe_aes_req_ctx), + .cra_module = THIS_MODULE, + }, + .op.do_one_request = dthe_aead_run, + }, /* CCM AES */ }; int dthe_register_aes_algs(void) diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h index 8514f0df8ac3..d4a3b9c18bbc 100644 --- a/drivers/crypto/ti/dthev2-common.h +++ b/drivers/crypto/ti/dthev2-common.h @@ -39,6 +39,7 @@ enum dthe_aes_mode { DTHE_AES_CTR, DTHE_AES_XTS, DTHE_AES_GCM, + DTHE_AES_CCM, }; /* Driver specific struct definitions */ From 5c8009f3c1885d5d996acdcd7e884aff25ac26a4 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Fri, 20 Mar 2026 22:19:23 +0100 Subject: [PATCH 075/129] crypto: inside-secure/eip93 - make it selectable for ECONET Econet SoCs feature an integrated EIP93 in revision 3.0p1. It is identical to the one used by the Airoha AN7581 and the MediaTek MT7621. Ahmed reports that the EN7528 passes testmgr's self-tests. This driver should also work on other little endian Econet SoCs. CC: Ahmed Naseef Signed-off-by: Aleksander Jan Bajkowski Reviewed-by: Antoine Tenart Tested-by: Ahmed Naseef Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig index 8353d3d7ec9b..29523f6927dd 100644 --- a/drivers/crypto/inside-secure/eip93/Kconfig +++ b/drivers/crypto/inside-secure/eip93/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config CRYPTO_DEV_EIP93 tristate "Support for EIP93 crypto HW accelerators" - depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST + depends on SOC_MT7621 || ARCH_AIROHA || ECONET || COMPILE_TEST select CRYPTO_LIB_AES select CRYPTO_LIB_DES select CRYPTO_SKCIPHER From 07fa25957a18cff13f1943ecd213c88c0878b968 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 20 Mar 2026 15:17:27 -0700 Subject: [PATCH 076/129] crypto: cryptd - Remove unused functions Many functions in cryptd.c no longer have any caller. Remove them. Also remove several associated structs and includes. Finally, inline cryptd_shash_desc() into its only caller, allowing it to be removed too. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cryptd.c | 112 +--------------------------------------- include/crypto/cryptd.h | 33 ------------ 2 files changed, 2 insertions(+), 143 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index cd38f4676176..aba9fe0f23b4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -646,7 +646,8 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct shash_desc *desc = cryptd_shash_desc(req); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct shash_desc *desc = &rctx->desc; desc->tfm = ctx->child; @@ -952,115 +953,6 @@ static struct crypto_template cryptd_tmpl = { .module = THIS_MODULE, }; -struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, - u32 type, u32 mask) -{ - char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; - struct cryptd_skcipher_ctx *ctx; - struct crypto_skcipher *tfm; - - if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, - "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-EINVAL); - - tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { - crypto_free_skcipher(tfm); - return ERR_PTR(-EINVAL); - } - - ctx = crypto_skcipher_ctx(tfm); - refcount_set(&ctx->refcnt, 1); - - return container_of(tfm, struct cryptd_skcipher, base); -} -EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); - -struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) -{ - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - - return ctx->child; -} -EXPORT_SYMBOL_GPL(cryptd_skcipher_child); - -bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) -{ - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - - return refcount_read(&ctx->refcnt) - 1; -} -EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); - -void cryptd_free_skcipher(struct cryptd_skcipher *tfm) -{ - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - - if (refcount_dec_and_test(&ctx->refcnt)) - crypto_free_skcipher(&tfm->base); -} -EXPORT_SYMBOL_GPL(cryptd_free_skcipher); - -struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, - u32 type, u32 mask) -{ - char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; - struct cryptd_hash_ctx *ctx; - struct crypto_ahash *tfm; - - if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, - "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-EINVAL); - tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { - crypto_free_ahash(tfm); - return ERR_PTR(-EINVAL); - } - - ctx = crypto_ahash_ctx(tfm); - refcount_set(&ctx->refcnt, 1); - - return __cryptd_ahash_cast(tfm); -} -EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); - -struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) -{ - struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - - return ctx->child; -} -EXPORT_SYMBOL_GPL(cryptd_ahash_child); - -struct shash_desc *cryptd_shash_desc(struct ahash_request *req) -{ - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - return &rctx->desc; -} -EXPORT_SYMBOL_GPL(cryptd_shash_desc); - -bool cryptd_ahash_queued(struct cryptd_ahash *tfm) -{ - struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - - return refcount_read(&ctx->refcnt) - 1; -} -EXPORT_SYMBOL_GPL(cryptd_ahash_queued); - -void cryptd_free_ahash(struct cryptd_ahash *tfm) -{ - struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - - if (refcount_dec_and_test(&ctx->refcnt)) - crypto_free_ahash(&tfm->base); -} -EXPORT_SYMBOL_GPL(cryptd_free_ahash); - struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask) { diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 796d986e58e1..29c5878a3609 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -16,39 +16,6 @@ #include #include -#include -#include - -struct cryptd_skcipher { - struct crypto_skcipher base; -}; - -/* alg_name should be algorithm to be cryptd-ed */ -struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, - u32 type, u32 mask); -struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); -/* Must be called without moving CPUs. */ -bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); -void cryptd_free_skcipher(struct cryptd_skcipher *tfm); - -struct cryptd_ahash { - struct crypto_ahash base; -}; - -static inline struct cryptd_ahash *__cryptd_ahash_cast( - struct crypto_ahash *tfm) -{ - return (struct cryptd_ahash *)tfm; -} - -/* alg_name should be algorithm to be cryptd-ed */ -struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, - u32 type, u32 mask); -struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); -struct shash_desc *cryptd_shash_desc(struct ahash_request *req); -/* Must be called without moving CPUs. */ -bool cryptd_ahash_queued(struct cryptd_ahash *tfm); -void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { struct crypto_aead base; From 67b53a660e6bf0da2fa8d8872e897a14d8059eaf Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Sat, 21 Mar 2026 15:00:38 +0800 Subject: [PATCH 077/129] crypto: hisilicon/sec2 - prevent req used-after-free for sec During packet transmission, if the system is under heavy load, the hardware might complete processing the packet and free the request memory (req) before the transmission function finishes. If the software subsequently accesses this req, a use-after-free error will occur. The qp_ctx memory exists throughout the packet sending process, so replace the req with the qp_ctx. Fixes: f0ae287c5045 ("crypto: hisilicon/sec2 - implement full backlog mode for sec") Signed-off-by: Wenkai Lin Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 15174216d8c4..2471a4dd0b50 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -230,7 +230,7 @@ static int qp_send_message(struct sec_req *req) spin_unlock_bh(&qp_ctx->req_lock); - atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt); + atomic64_inc(&qp_ctx->ctx->sec->debug.dfx.send_cnt); return -EINPROGRESS; } From 9503ab5a1d0ef4cad2731d88699d7e6bb1a8a85d Mon Sep 17 00:00:00 2001 From: Mieczyslaw Nalewaj Date: Sat, 21 Mar 2026 10:59:37 +0100 Subject: [PATCH 078/129] crypto: inside-secure/eip93 - correct ecb(des-eip93) typo Correct the typo in the name "ecb(des-eip93)". Signed-off-by: Mieczyslaw Nalewaj Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/eip93-cipher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c index 0713c71ab458..b91586853c27 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-cipher.c +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c @@ -320,7 +320,7 @@ struct eip93_alg_template eip93_alg_ecb_des = { .ivsize = 0, .base = { .cra_name = "ecb(des)", - .cra_driver_name = "ebc(des-eip93)", + .cra_driver_name = "ecb(des-eip93)", .cra_priority = EIP93_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, From fdacdc8cf897703a5a3e8b521448befbb6620034 Mon Sep 17 00:00:00 2001 From: Mieczyslaw Nalewaj Date: Sat, 21 Mar 2026 11:23:06 +0100 Subject: [PATCH 079/129] crypto: inside-secure/eip93 - add missing address terminator character Add the missing > characters to the end of the email address Signed-off-by: Mieczyslaw Nalewaj Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/eip93-aead.c | 2 +- drivers/crypto/inside-secure/eip93/eip93-aead.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-aes.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-cipher.c | 2 +- drivers/crypto/inside-secure/eip93/eip93-cipher.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-common.c | 2 +- drivers/crypto/inside-secure/eip93/eip93-common.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-des.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-hash.c | 2 +- drivers/crypto/inside-secure/eip93/eip93-hash.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-main.c | 2 +- drivers/crypto/inside-secure/eip93/eip93-main.h | 2 +- drivers/crypto/inside-secure/eip93/eip93-regs.h | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c index 1a08aed5de13..2bbd0af7b0e0 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-aead.c +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #include diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h index e2fa8fd39c50..d933a8fbdf04 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-aead.h +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_AEAD_H_ #define _EIP93_AEAD_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h index 1d83d39cab2a..82064cc8f5c7 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-aes.h +++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_AES_H_ #define _EIP93_AES_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c index b91586853c27..4dd7ab7503e8 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-cipher.c +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #include diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h index 6e2545ebd879..47e4e84ff14e 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-cipher.h +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_CIPHER_H_ #define _EIP93_CIPHER_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c index f4ad6beff15e..6f147014f996 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-common.c +++ b/drivers/crypto/inside-secure/eip93/eip93-common.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #include diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h index 80964cfa34df..41c43782eb5c 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-common.h +++ b/drivers/crypto/inside-secure/eip93/eip93-common.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_COMMON_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h index 74748df04acf..53ffe0f341b8 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-des.h +++ b/drivers/crypto/inside-secure/eip93/eip93-des.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_DES_H_ #define _EIP93_DES_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c index 2705855475b2..84d3ff2d3836 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-hash.c +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2024 * - * Christian Marangi */ #include diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h index 556f22fc1dd0..29da18d78894 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-hash.h +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_HASH_H_ #define _EIP93_HASH_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c index 76858bb4fcc2..7dccfdeb7b11 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-main.c +++ b/drivers/crypto/inside-secure/eip93/eip93-main.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #include diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h index 79b078f0e5da..990c2401b7ce 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-main.h +++ b/drivers/crypto/inside-secure/eip93/eip93-main.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef _EIP93_MAIN_H_ #define _EIP93_MAIN_H_ diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h index 116b3fbb6ad7..96285ca6fbbe 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-regs.h +++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 - 2021 * * Richard van Schagen - * Christian Marangi */ #ifndef REG_EIP93_H #define REG_EIP93_H From be0240f65705b0b125de60d4fc952c013ef74e26 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 21 Mar 2026 14:14:39 +0100 Subject: [PATCH 080/129] crypto: qce - use memcpy_and_pad in qce_aead_setkey Replace memset() followed by memcpy() with memcpy_and_pad() to simplify the code and to write to ->auth_key only once. Signed-off-by: Thorsten Blum Reviewed-by: Konrad Dybcio Signed-off-by: Herbert Xu --- drivers/crypto/qce/aead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 79e92bc3f7d3..9cb11fada2c4 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -637,8 +637,8 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen); - memset(ctx->auth_key, 0, sizeof(ctx->auth_key)); - memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen); + memcpy_and_pad(ctx->auth_key, sizeof(ctx->auth_key), + authenc_keys.authkey, authenc_keys.authkeylen, 0); return crypto_aead_setkey(ctx->fallback, key, keylen); } From c8aadd63ab58ee75713ab487730563e7a160cc35 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Tue, 3 Mar 2026 19:48:44 +0100 Subject: [PATCH 081/129] crypto: testmgr - Add test vectors for authenc(hmac(md5),cbc(aes)) Test vectors were generated starting from existing CBC(AES) test vectors (RFC3602, NIST SP800-38A) and adding HMAC(MD5) computed with Python script. Then, the results were double-checked on Mediatek MT7981 (safexcel) and NXP P2020 (talitos). Both platforms pass self-tests. Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Herbert Xu --- crypto/testmgr.c | 7 ++ crypto/testmgr.h | 255 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 262 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 9a9647175e4e..30671e7bc349 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4079,6 +4079,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .aead = __VECS(aegis128_tv_template) } + }, { + .alg = "authenc(hmac(md5),cbc(aes))", + .generic_driver = "authenc(hmac-md5-lib,cbc(aes-lib))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(hmac_md5_aes_cbc_tv_temp) + } }, { .alg = "authenc(hmac(md5),cbc(des))", .generic_driver = "authenc(hmac-md5-lib,cbc(des-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 6995a576a15a..9487fafc74c9 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -14562,6 +14562,261 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { }, }; +static const struct aead_testvec hmac_md5_aes_cbc_tv_temp[] = { + { /* RFC 3602 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", + .klen = 8 + 16 + 16, + .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" + "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" + "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .alen = 16, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + "\x27\x08\x94\x2d\xbe\x77\x18\x1a" + "\x22\x10\xf2\x25\x7f\xe9\x0d\x92" + "\xfc\x00\x55\xb1\xd0\xb5\x3a\x74", + .clen = 16 + 16, + }, { /* RFC 3602 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" + "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", + .klen = 8 + 16 + 16, + .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" + "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" + "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .alen = 16, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" + "\x75\x86\x60\x2d\x25\x3c\xff\xf9" + "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" + "\x31\xef\xd1\x5e\x2d\x83\xde\x59" + "\x5c\x63\x6c\xd6\x6e\x96\x8c\x5b", + .clen = 32 + 16, + }, { /* RFC 3602 Case 3 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x6c\x3e\xa0\x47\x76\x30\xce\x21" + "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd", + .klen = 8 + 16 + 16, + .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" + "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", + .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" + "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", + .alen = 16, + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" + "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" + "\x50\x69\x39\x27\x67\x72\xf8\xd5" + "\x02\x1c\x19\x21\x6b\xad\x52\x5c" + "\x85\x79\x69\x5d\x83\xba\x26\x84" + "\xa1\x9e\xc5\x65\x43\xc5\x51\x70" + "\xb5\xc8\x38\xce\xbb\x3b\xc6\x0f", + .clen = 48 + 16, + }, { /* RFC 3602 Case 4 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x56\xe4\x7a\x38\xc5\x59\x89\x74" + "\xbc\x46\x90\x3d\xba\x29\x03\x49", + .klen = 8 + 16 + 16, + .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" + "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", + .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" + "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", + .alen = 16, + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" + "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" + "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" + "\x35\x90\x7a\xa6\x32\xc3\xff\xdf" + "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad" + "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d" + "\x49\xa5\x3e\x87\xf4\xc3\xda\x55" + "\x19\x90\xcc\x2c\x6d\x76\x0f\xd6" + "\x6c\x54\x09\xb1\x3e\x98\x0c\x11", + .clen = 64 + 16, + }, { /* RFC 3602 Case 5 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x90\xd3\x82\xb4\x10\xee\xba\x7a" + "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf", + .klen = 8 + 16 + 16, + .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" + "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" + "\xe9\x6e\x8c\x08\xab\x46\x57\x63" + "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", + .alen = 24, + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + "\xa9\x45\x3e\x19\x4e\x12\x08\x49" + "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" + "\x33\x00\x13\xb4\x89\x8d\xc8\x56" + "\xa4\x69\x9e\x52\x3a\x55\xdb\x08" + "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52" + "\x77\x5b\x07\xd1\xdb\x34\xed\x9c" + "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a" + "\xa2\x69\xad\xd0\x47\xad\x2d\x59" + "\x13\xac\x19\xb7\xcf\xba\xd4\xa6" + "\x9f\x6f\xa4\x85\x28\xf1\xc9\xea" + "\xe1\xd0\x7d\x30\x4a\xd0\x81\x12", + .clen = 80 + 16, + }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x18" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 8 + 16 + 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .alen = 16, + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" + "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" + "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" + "\x57\x1b\x24\x20\x12\xfb\x7a\xe0" + "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0" + "\x08\xb0\xe2\x79\x88\x59\x88\x81" + "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd" + "\xc3\x46\xe5\x2c\x07\x27\x50\xca" + "\x50\x4a\x83\x5f\x72\xd9\x76\x8d", + .clen = 64 + 16, + }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x20" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 8 + 16 + 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .alen = 16, + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" + "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" + "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" + "\x39\xf2\x33\x69\xa9\xd9\xba\xcf" + "\xa5\x30\xe2\x63\x04\x23\x14\x61" + "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" + "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b" + "\x59\x62\x06\x71\x57\xdf\x18\x15" + "\x32\x02\xfa\xce\x2c\xd2\x1a\x8d", + .clen = 64 + 16, + }, +}; + static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN From b260d53561dd69b29505222ec44cf386ac2c2ca6 Mon Sep 17 00:00:00 2001 From: Ahsan Atta Date: Tue, 24 Mar 2026 11:11:12 +0000 Subject: [PATCH 082/129] crypto: qat - disable 4xxx AE cluster when lead engine is fused off The get_ae_mask() function only disables individual engines based on the fuse register, but engines are organized in clusters of 4. If the lead engine of a cluster is fused off, the entire cluster must be disabled. Replace the single bitmask inversion with explicit test_bit() checks on the lead engine of each group, disabling the full ADF_AE_GROUP when the lead bit is set. Signed-off-by: Ahsan Atta Reviewed-by: Giovanni Cabiddu Fixes: 8c8268166e834 ("crypto: qat - add qat_4xxx driver") Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 740f68a36ac5..900f19b90b2d 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -100,9 +100,19 @@ static struct adf_hw_device_class adf_4xxx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses[ADF_FUSECTL4]; + unsigned long fuses = self->fuses[ADF_FUSECTL4]; + u32 mask = ADF_4XXX_ACCELENGINES_MASK; - return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; + if (test_bit(0, &fuses)) + mask &= ~ADF_AE_GROUP_0; + + if (test_bit(4, &fuses)) + mask &= ~ADF_AE_GROUP_1; + + if (test_bit(8, &fuses)) + mask &= ~ADF_AE_GROUP_2; + + return mask; } static u32 get_accel_cap(struct adf_accel_dev *accel_dev) From f216e0f2d1787e662bb6662c9c522185aa3b855a Mon Sep 17 00:00:00 2001 From: Ahsan Atta Date: Tue, 24 Mar 2026 11:12:34 +0000 Subject: [PATCH 083/129] crypto: qat - disable 420xx AE cluster when lead engine is fused off The get_ae_mask() function only disables individual engines based on the fuse register, but engines are organized in clusters of 4. If the lead engine of a cluster is fused off, the entire cluster must be disabled. Replace the single bitmask inversion with explicit test_bit() checks on the lead engine of each group, disabling the full ADF_AE_GROUP when the lead bit is set. Signed-off-by: Ahsan Atta Reviewed-by: Giovanni Cabiddu Fixes: fcf60f4bcf54 ("crypto: qat - add support for 420xx devices") Signed-off-by: Herbert Xu --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 35105213d40c..0002122219bc 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -97,9 +97,25 @@ static struct adf_hw_device_class adf_420xx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses[ADF_FUSECTL4]; + unsigned long fuses = self->fuses[ADF_FUSECTL4]; + u32 mask = ADF_420XX_ACCELENGINES_MASK; - return ~me_disable & ADF_420XX_ACCELENGINES_MASK; + if (test_bit(0, &fuses)) + mask &= ~ADF_AE_GROUP_0; + + if (test_bit(4, &fuses)) + mask &= ~ADF_AE_GROUP_1; + + if (test_bit(8, &fuses)) + mask &= ~ADF_AE_GROUP_2; + + if (test_bit(12, &fuses)) + mask &= ~ADF_AE_GROUP_3; + + if (test_bit(16, &fuses)) + mask &= ~ADF_AE_GROUP_4; + + return mask; } static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) From d7f3162ff802b5c39e3c449be8bccd297a4d7267 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 24 Mar 2026 12:27:05 +0100 Subject: [PATCH 084/129] crypto: hifn_795x - Replace snprintf("%s") with strscpy Replace snprintf("%s", ...) with the faster and more direct strscpy(). Check if the return value is less than 0 to detect string truncation. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index a897541f897b..2da0894f31fd 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -2256,8 +2257,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat alg->alg.init = hifn_init_tfm; err = -EINVAL; - if (snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s", t->name) >= CRYPTO_MAX_ALG_NAME) + if (strscpy(alg->alg.base.cra_name, t->name) < 0) goto out_free_alg; if (snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s", t->drv_name, dev->name) >= CRYPTO_MAX_ALG_NAME) @@ -2367,7 +2367,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&dev->alg_list); - snprintf(dev->name, sizeof(dev->name), "%s", name); + strscpy(dev->name, name); spin_lock_init(&dev->lock); for (i = 0; i < 3; ++i) { From ee31b703e808f75a98b958305fbf2765a4e172a9 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 24 Mar 2026 12:30:07 +0100 Subject: [PATCH 085/129] crypto: ccp - Replace snprintf("%s") with strscpy Replace snprintf("%s") with the faster and more direct strscpy(). Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-aes-galois.c | 6 +++--- drivers/crypto/ccp/ccp-crypto-aes-xts.c | 6 +++--- drivers/crypto/ccp/ccp-crypto-aes.c | 5 ++--- drivers/crypto/ccp/ccp-crypto-des3.c | 5 ++--- drivers/crypto/ccp/ccp-crypto-rsa.c | 6 +++--- drivers/crypto/ccp/ccp-crypto-sha.c | 5 ++--- 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 6c8d1b87d60d..fc14c2e73ccd 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -223,9 +224,8 @@ static int ccp_register_aes_aead(struct list_head *head, /* Copy the defaults and override as necessary */ alg = &ccp_aead->alg; *alg = *def->alg_defaults; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->driver_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->driver_name); alg->base.cra_blocksize = def->blocksize; ret = crypto_register_aead(alg); diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index c7e26ce71156..8e59137284b7 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -239,9 +240,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head, alg = &ccp_alg->alg; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->drv_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->drv_name); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 01d298350b92..94bccc5d6c78 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -305,9 +305,8 @@ static int ccp_register_aes_alg(struct list_head *head, /* Copy the defaults and override as necessary */ alg = &ccp_alg->alg; *alg = *def->alg_defaults; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->driver_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index c20b5a6a340a..e26b431a5993 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -193,9 +193,8 @@ static int ccp_register_des3_alg(struct list_head *head, /* Copy the defaults and override as necessary */ alg = &ccp_alg->alg; *alg = *def->alg_defaults; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->driver_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index 090adacaaf93..287d7f62026d 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -257,9 +258,8 @@ static int ccp_register_rsa_alg(struct list_head *head, alg = &ccp_alg->alg; *alg = *def->alg_defaults; - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->driver_name); + strscpy(alg->base.cra_name, def->name); + strscpy(alg->base.cra_driver_name, def->driver_name); ret = crypto_register_akcipher(alg); if (ret) { pr_err("%s akcipher algorithm registration error (%d)\n", diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 286b2d716236..85058a89f35b 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -484,9 +484,8 @@ static int ccp_register_sha_alg(struct list_head *head, halg->statesize = sizeof(struct ccp_sha_exp_ctx); base = &halg->base; - snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); - snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - def->drv_name); + strscpy(base->cra_name, def->name); + strscpy(base->cra_driver_name, def->drv_name); base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | From 4e04f469b41f527fffa5dc92aae437b1f1d8bff1 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 24 Mar 2026 16:52:11 +0000 Subject: [PATCH 086/129] crypto: qat - use acomp_tfm_ctx() Replace the usage of crypto_acomp_tfm() followed by crypto_tfm_ctx() with a single call to the equivalent acomp_tfm_ctx(). This does not introduce any functional changes. Signed-off-by: Giovanni Cabiddu Reviewed-by: Laurent M Coquerel Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_comp_algs.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index 8b123472b71c..1265177e3a89 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -130,8 +130,8 @@ void qat_comp_alg_callback(void *resp) static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) { + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_compression_instance *inst; int node; @@ -151,8 +151,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) { - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); qat_compression_put_instance(ctx->inst); memset(ctx, 0, sizeof(*ctx)); @@ -164,8 +163,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi { struct qat_compression_req *qat_req = acomp_request_ctx(areq); struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); struct qat_compression_instance *inst = ctx->inst; gfp_t f = qat_algs_alloc_flags(&areq->base); struct qat_sgl_to_bufl_params params = {0}; From 795c24c677c7a1c12f5768daf22a874a2890662f Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 24 Mar 2026 17:59:40 +0000 Subject: [PATCH 087/129] crypto: qat - fix compression instance leak qat_comp_alg_init_tfm() acquires a compression instance via qat_compression_get_instance_node() before calling qat_comp_build_ctx() to initialize the compression context. If qat_comp_build_ctx() fails, the function returns an error without releasing the compression instance, causing a resource leak. When qat_comp_build_ctx() fails, release the compression instance with qat_compression_put_instance() and clear the context to avoid leaving a stale reference to the released instance. The issue was introduced when build_deflate_ctx() (which always returned void) was replaced by qat_comp_build_ctx() (which can return an error) without adding error handling for the failure path. Fixes: cd0e7160f80f ("crypto: qat - refactor compression template logic") Signed-off-by: Giovanni Cabiddu Reviewed-by: Laurent M Coquerel Reviewed-by: Ahsan Atta Reviewed-by: Wojciech Drewek Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_comp_algs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index 1265177e3a89..bfc820a08ada 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -133,7 +133,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); struct qat_compression_instance *inst; - int node; + int node, ret; if (tfm->node == NUMA_NO_NODE) node = numa_node_id(); @@ -146,7 +146,13 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) return -EINVAL; ctx->inst = inst; - return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE); + ret = qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE); + if (ret) { + qat_compression_put_instance(inst); + memset(ctx, 0, sizeof(*ctx)); + } + + return ret; } static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) From ec23d75c4b77ae42af0777ea59599b1d4f611371 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 24 Mar 2026 18:17:23 +0000 Subject: [PATCH 088/129] crypto: qat - fix type mismatch in RAS sysfs show functions ADF_RAS_ERR_CTR_READ() expands to atomic_read(), which returns int. The local variable 'counter' was declared as 'unsigned long', causing a type mismatch on the assignment. The format specifier '%ld' was consequently wrong in two ways: wrong length modifier and wrong signedness. Use int to match the return type of atomic_read() and update the format specifier to '%d' accordingly. Fixes: 532d7f6bc458 ("crypto: qat - add error counters") Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_common/adf_sysfs_ras_counters.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c index e97c67c87b3c..6abb57bfd328 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -13,14 +13,14 @@ static ssize_t errors_correctable_show(struct device *dev, char *buf) { struct adf_accel_dev *accel_dev; - unsigned long counter; + int counter; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); - return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); + return scnprintf(buf, PAGE_SIZE, "%d\n", counter); } static ssize_t errors_nonfatal_show(struct device *dev, @@ -28,14 +28,14 @@ static ssize_t errors_nonfatal_show(struct device *dev, char *buf) { struct adf_accel_dev *accel_dev; - unsigned long counter; + int counter; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); - return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); + return scnprintf(buf, PAGE_SIZE, "%d\n", counter); } static ssize_t errors_fatal_show(struct device *dev, @@ -43,14 +43,14 @@ static ssize_t errors_fatal_show(struct device *dev, char *buf) { struct adf_accel_dev *accel_dev; - unsigned long counter; + int counter; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); - return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); + return scnprintf(buf, PAGE_SIZE, "%d\n", counter); } static ssize_t reset_error_counters_store(struct device *dev, From 6bd87f2ea5b2d44ed1f43a90f220be34f7c1ce4e Mon Sep 17 00:00:00 2001 From: Atharv Dubey Date: Tue, 24 Mar 2026 18:17:24 +0000 Subject: [PATCH 089/129] crypto: qat - replace scnprintf() with sysfs_emit() Replace scnprintf() with sysfs_emit() in the three RAS error counter sysfs show callbacks. sysfs_emit() is the recommended API for sysfs show functions as per Documentation/filesystems/sysfs.rst; it enforces the PAGE_SIZE limit implicitly, removing the need to pass it explicitly. Signed-off-by: Atharv Dubey Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c index 6abb57bfd328..ef1420199210 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -20,7 +20,7 @@ static ssize_t errors_correctable_show(struct device *dev, return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); - return scnprintf(buf, PAGE_SIZE, "%d\n", counter); + return sysfs_emit(buf, "%d\n", counter); } static ssize_t errors_nonfatal_show(struct device *dev, @@ -35,7 +35,7 @@ static ssize_t errors_nonfatal_show(struct device *dev, return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); - return scnprintf(buf, PAGE_SIZE, "%d\n", counter); + return sysfs_emit(buf, "%d\n", counter); } static ssize_t errors_fatal_show(struct device *dev, @@ -50,7 +50,7 @@ static ssize_t errors_fatal_show(struct device *dev, return -EINVAL; counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); - return scnprintf(buf, PAGE_SIZE, "%d\n", counter); + return sysfs_emit(buf, "%d\n", counter); } static ssize_t reset_error_counters_store(struct device *dev, From 590fa5d69c27cfaecd2e8287aec78f902417c877 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 24 Mar 2026 18:29:05 +0000 Subject: [PATCH 090/129] crypto: iaa - fix per-node CPU counter reset in rebalance_wq_table() The cpu counter used to compute the IAA device index is reset to zero at the start of each NUMA node iteration. This causes CPUs on every node to map starting from IAA index 0 instead of continuing from the previous node's last index. On multi-node systems, this results in all nodes mapping their CPUs to the same initial set of IAA devices, leaving higher-indexed devices unused. Move the cpu counter initialization before the for_each_node_with_cpus() loop so that the IAA index computation accumulates correctly across all nodes. Fixes: 714ca27e9bf4 ("crypto: iaa - Optimize rebalance_wq_table()") Signed-off-by: Giovanni Cabiddu Acked-by: Vinicius Costa Gomes Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 547abf453d4a..f62b994e18e5 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -906,8 +906,8 @@ static void rebalance_wq_table(void) return; } + cpu = 0; for_each_node_with_cpus(node) { - cpu = 0; node_cpus = cpumask_of_node(node); for_each_cpu(node_cpu, node_cpus) { From 52b84667bbdc656b380983262ac6303caf49ef2c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:14:57 -0700 Subject: [PATCH 091/129] crypto: rng - Add crypto_stdrng_get_bytes() All callers of crypto_get_default_rng() use the following sequence: crypto_get_default_rng() crypto_rng_get_bytes(crypto_default_rng, ...) crypto_put_default_rng() While it may have been intended that callers amortize the cost of getting and putting the "default RNG" (i.e. "stdrng") over multiple calls, in practice that optimization is never used. The callers just want a function that gets random bytes from the "stdrng". Therefore, add such a function: crypto_stdrng_get_bytes(). Importantly, this decouples the callers from the crypto_rng API. That allows a later commit to make this function simply call get_random_bytes_wait() unless the kernel is in "FIPS mode". Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/rng.c | 14 ++++++++++++++ include/crypto/rng.h | 13 +++++++++++++ 2 files changed, 27 insertions(+) diff --git a/crypto/rng.c b/crypto/rng.c index c6165c8eb387..53a268ad5104 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -145,6 +145,20 @@ void crypto_put_default_rng(void) } EXPORT_SYMBOL_GPL(crypto_put_default_rng); +int crypto_stdrng_get_bytes(void *buf, unsigned int len) +{ + int err; + + err = crypto_get_default_rng(); + if (err) + return err; + + err = crypto_rng_get_bytes(crypto_default_rng, buf, len); + crypto_put_default_rng(); + return err; +} +EXPORT_SYMBOL_GPL(crypto_stdrng_get_bytes); + #if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) int crypto_del_default_rng(void) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index d451b54b322a..db6c3962a7df 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -62,6 +62,19 @@ extern struct crypto_rng *crypto_default_rng; int crypto_get_default_rng(void); void crypto_put_default_rng(void); +/** + * crypto_stdrng_get_bytes() - get cryptographically secure random bytes + * @buf: output buffer holding the random numbers + * @len: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * highest-priority "stdrng" algorithm in the crypto_rng subsystem. + * + * Context: May sleep + * Return: 0 function was successful; < 0 if an error occurred + */ +int crypto_stdrng_get_bytes(void *buf, unsigned int len); + /** * DOC: Random number generator API * From 9e9ff291c9776c109a198eab5b5ac3828abf8507 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:14:58 -0700 Subject: [PATCH 092/129] crypto: dh - Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/dh.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crypto/dh.c b/crypto/dh.c index 8250eeeebd0f..7ad4768716c8 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -388,13 +388,7 @@ static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime, * 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64 * random bits and interpret them as a big endian integer. */ - err = -EFAULT; - if (crypto_get_default_rng()) - goto out_err; - - err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key, - oversampling_size); - crypto_put_default_rng(); + err = crypto_stdrng_get_bytes(key, oversampling_size); if (err) goto out_err; From d6ea871d73abbb6a1e00e71ed5762e394d06cb2b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:14:59 -0700 Subject: [PATCH 093/129] crypto: ecc - Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ecc.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/crypto/ecc.c b/crypto/ecc.c index 08150b14e17e..43b0def3a225 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1533,16 +1533,11 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, * The maximum security strength identified by NIST SP800-57pt1r4 for * ECC is 256 (N >= 512). * - * This condition is met by the default RNG because it selects a favored - * DRBG with a security strength of 256. + * This condition is met by stdrng because it selects a favored DRBG + * with a security strength of 256. */ - if (crypto_get_default_rng()) - return -EFAULT; - /* Step 3: obtain N returned_bits from the DRBG. */ - err = crypto_rng_get_bytes(crypto_default_rng, - (u8 *)private_key, nbytes); - crypto_put_default_rng(); + err = crypto_stdrng_get_bytes(private_key, nbytes); if (err) return err; From c7373a6ba57e13999af590703b63071e4f13d652 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:00 -0700 Subject: [PATCH 094/129] crypto: geniv - Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/geniv.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crypto/geniv.c b/crypto/geniv.c index 42eff6a7387c..c619a5ad2fc1 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -114,13 +114,7 @@ int aead_init_geniv(struct crypto_aead *aead) spin_lock_init(&ctx->lock); - err = crypto_get_default_rng(); - if (err) - goto out; - - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_aead_ivsize(aead)); - crypto_put_default_rng(); + err = crypto_stdrng_get_bytes(ctx->salt, crypto_aead_ivsize(aead)); if (err) goto out; From c30e1bbc9549b39c33f61310302eb8ccb98c7de4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:01 -0700 Subject: [PATCH 095/129] crypto: hisilicon/hpre - Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 839c1f677143..09077abbf6ad 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1327,17 +1327,9 @@ static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params) struct device *dev = ctx->dev; int ret; - ret = crypto_get_default_rng(); - if (ret) { - dev_err(dev, "failed to get default rng, ret = %d!\n", ret); - return ret; - } - - ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key, - params->key_size); - crypto_put_default_rng(); + ret = crypto_stdrng_get_bytes(params->key, params->key_size); if (ret) - dev_err(dev, "failed to get rng, ret = %d!\n", ret); + dev_err(dev, "failed to get random bytes, ret = %d!\n", ret); return ret; } From eba92a2d7e51601adae3d3b37df2e5a8a3c0de5b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:02 -0700 Subject: [PATCH 096/129] crypto: intel/keembay-ocs-ecc - Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/intel/keembay/keembay-ocs-ecc.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c index 59308926399d..e61a95f66a0c 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c @@ -230,12 +230,7 @@ static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev, int rc = 0; /* Generate random nbytes for Simple and Differential SCA protection. */ - rc = crypto_get_default_rng(); - if (rc) - return rc; - - rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes); - crypto_put_default_rng(); + rc = crypto_stdrng_get_bytes(sca, nbytes); if (rc) return rc; @@ -509,14 +504,10 @@ static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey) * The maximum security strength identified by NIST SP800-57pt1r4 for * ECC is 256 (N >= 512). * - * This condition is met by the default RNG because it selects a favored - * DRBG with a security strength of 256. + * This condition is met by stdrng because it selects a favored DRBG + * with a security strength of 256. */ - if (crypto_get_default_rng()) - return -EFAULT; - - rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); - crypto_put_default_rng(); + rc = crypto_stdrng_get_bytes(priv, nbytes); if (rc) goto cleanup; From 117c3c4df23d973357a523f6dce1f63d28bbf8aa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:03 -0700 Subject: [PATCH 097/129] net: tipc: Use crypto_stdrng_get_bytes() Replace the sequence of crypto_get_default_rng(), crypto_rng_get_bytes(), and crypto_put_default_rng() with the equivalent helper function crypto_stdrng_get_bytes(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- net/tipc/crypto.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index d3046a39ff72..6d3b6b89b1d1 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -367,17 +367,8 @@ int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) */ static int tipc_aead_key_generate(struct tipc_aead_key *skey) { - int rc = 0; - - /* Fill the key's content with a random value via RNG cipher */ - rc = crypto_get_default_rng(); - if (likely(!rc)) { - rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, - skey->keylen); - crypto_put_default_rng(); - } - - return rc; + /* Fill the key's content with a random value via stdrng */ + return crypto_stdrng_get_bytes(skey->key, skey->keylen); } static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) From bdd2cc93bfd051f05084115faad35f9b5402a194 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:04 -0700 Subject: [PATCH 098/129] crypto: rng - Unexport "default RNG" symbols Now that crypto_default_rng, crypto_get_default_rng(), and crypto_put_default_rng() have no users outside crypto/rng.c itself, unexport them and make them static. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/rng.c | 9 +++------ include/crypto/rng.h | 5 ----- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/crypto/rng.c b/crypto/rng.c index 53a268ad5104..f52f4793f9ea 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -24,8 +24,7 @@ #include "internal.h" static DEFINE_MUTEX(crypto_default_rng_lock); -struct crypto_rng *crypto_default_rng; -EXPORT_SYMBOL_GPL(crypto_default_rng); +static struct crypto_rng *crypto_default_rng; static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) @@ -106,7 +105,7 @@ struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_rng); -int crypto_get_default_rng(void) +static int crypto_get_default_rng(void) { struct crypto_rng *rng; int err; @@ -135,15 +134,13 @@ unlock: return err; } -EXPORT_SYMBOL_GPL(crypto_get_default_rng); -void crypto_put_default_rng(void) +static void crypto_put_default_rng(void) { mutex_lock(&crypto_default_rng_lock); crypto_default_rng_refcnt--; mutex_unlock(&crypto_default_rng_lock); } -EXPORT_SYMBOL_GPL(crypto_put_default_rng); int crypto_stdrng_get_bytes(void *buf, unsigned int len) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index db6c3962a7df..f61e037afed9 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -57,11 +57,6 @@ struct crypto_rng { struct crypto_tfm base; }; -extern struct crypto_rng *crypto_default_rng; - -int crypto_get_default_rng(void); -void crypto_put_default_rng(void); - /** * crypto_stdrng_get_bytes() - get cryptographically secure random bytes * @buf: output buffer holding the random numbers From 65b3c2f6278516397bebcdbf4698bd3102120ca5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:05 -0700 Subject: [PATCH 099/129] crypto: rng - Make crypto_stdrng_get_bytes() use normal RNG in non-FIPS mode "stdrng" is needed only in "FIPS mode". Therefore, make crypto_stdrng_get_bytes() delegate to either the normal Linux RNG or to "stdrng", depending on the current mode. This will eliminate the need to built the SP800-90A DRBG and its dependencies into CRYPTO_FIPS=n kernels. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/rng.c | 4 ++-- include/crypto/rng.h | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/crypto/rng.c b/crypto/rng.c index f52f4793f9ea..1d4b9177bad4 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -142,7 +142,7 @@ static void crypto_put_default_rng(void) mutex_unlock(&crypto_default_rng_lock); } -int crypto_stdrng_get_bytes(void *buf, unsigned int len) +int __crypto_stdrng_get_bytes(void *buf, unsigned int len) { int err; @@ -154,7 +154,7 @@ int crypto_stdrng_get_bytes(void *buf, unsigned int len) crypto_put_default_rng(); return err; } -EXPORT_SYMBOL_GPL(crypto_stdrng_get_bytes); +EXPORT_SYMBOL_GPL(__crypto_stdrng_get_bytes); #if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) int crypto_del_default_rng(void) diff --git a/include/crypto/rng.h b/include/crypto/rng.h index f61e037afed9..07f494b2c881 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include struct crypto_rng; @@ -57,18 +59,27 @@ struct crypto_rng { struct crypto_tfm base; }; +int __crypto_stdrng_get_bytes(void *buf, unsigned int len); + /** * crypto_stdrng_get_bytes() - get cryptographically secure random bytes * @buf: output buffer holding the random numbers * @len: length of the output buffer * * This function fills the caller-allocated buffer with random numbers using the - * highest-priority "stdrng" algorithm in the crypto_rng subsystem. + * normal Linux RNG if fips_enabled=0, or the highest-priority "stdrng" + * algorithm in the crypto_rng subsystem if fips_enabled=1. * * Context: May sleep * Return: 0 function was successful; < 0 if an error occurred */ -int crypto_stdrng_get_bytes(void *buf, unsigned int len); +static inline int crypto_stdrng_get_bytes(void *buf, unsigned int len) +{ + might_sleep(); + if (fips_enabled) + return __crypto_stdrng_get_bytes(buf, len); + return get_random_bytes_wait(buf, len); +} /** * DOC: Random number generator API From 7339b0e0b75eb56c3b9402bc831799138d219144 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:06 -0700 Subject: [PATCH 100/129] crypto: fips - Depend on CRYPTO_DRBG=y Currently, the callers of crypto_stdrng_get_bytes() do 'select CRYPTO_RNG_DEFAULT', which does 'select CRYPTO_DRBG_MENU'. However, due to the change in how crypto_stdrng_get_bytes() is implemented, CRYPTO_DRBG_MENU is now needed only when CRYPTO_FIPS. But, 'select CRYPTO_DRBG_MENU if CRYPTO_FIPS' would cause a recursive dependency, since CRYPTO_FIPS 'depends on CRYPTO_DRBG'. Solve this by just making CRYPTO_FIPS depend on CRYPTO_DRBG=y (rather than CRYPTO_DRBG i.e. CRYPTO_DRBG=y || CRYPTO_DRBG=m). The distros that use CRYPTO_FIPS=y already set CRYPTO_DRBG=y anyway, which makes sense. This makes the CRYPTO_RNG_DEFAULT symbol (and its corresponding selection of CRYPTO_DRBG_MENU) unnecessary. A later commit removes it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index e2b4106ac961..80492538e1f7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -25,7 +25,7 @@ menu "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - depends on CRYPTO_DRBG && CRYPTO_SELFTESTS + depends on CRYPTO_DRBG=y && CRYPTO_SELFTESTS depends on (MODULE_SIG || !MODULES) help This option enables the fips boot option which is From 4061bc8c03975e6491fbe9e3cd5e53d2c003c812 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 25 Mar 2026 17:15:07 -0700 Subject: [PATCH 101/129] crypto: rng - Don't pull in DRBG when CRYPTO_FIPS=n crypto_stdrng_get_bytes() is now always available: - When CRYPTO_FIPS=n it is an inline function that always calls into the always-built-in drivers/char/random.c. - When CRYPTO_FIPS=y it is an inline function that calls into either random.c or crypto/rng.c, depending on the value of fips_enabled. The former is again always built-in. The latter is built-in as well in this case, due to CRYPTO_FIPS=y. Thus, the CRYPTO_RNG_DEFAULT symbol is no longer needed. Remove it. This makes it so that CRYPTO_DRBG_MENU (and hence also CRYPTO_DRBG, CRYPTO_JITTERENTROPY, and CRYPTO_LIB_SHA3) no longer gets unnecessarily pulled into CRYPTO_FIPS=n kernels. I.e. CRYPTO_FIPS=n kernels are no longer bloated with code that is relevant only to FIPS certifications. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 80492538e1f7..13686f033413 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -109,10 +109,6 @@ config CRYPTO_RNG2 tristate select CRYPTO_ALGAPI2 -config CRYPTO_RNG_DEFAULT - tristate - select CRYPTO_DRBG_MENU - config CRYPTO_AKCIPHER2 tristate select CRYPTO_ALGAPI2 @@ -296,7 +292,6 @@ config CRYPTO_DH config CRYPTO_DH_RFC7919_GROUPS bool "RFC 7919 FFDHE groups" depends on CRYPTO_DH - select CRYPTO_RNG_DEFAULT help FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups defined in RFC7919. @@ -308,7 +303,6 @@ config CRYPTO_DH_RFC7919_GROUPS config CRYPTO_ECC tristate - select CRYPTO_RNG_DEFAULT config CRYPTO_ECDH tristate "ECDH (Elliptic Curve Diffie-Hellman)" @@ -804,7 +798,6 @@ config CRYPTO_GENIV tristate select CRYPTO_AEAD select CRYPTO_MANAGER - select CRYPTO_RNG_DEFAULT config CRYPTO_SEQIV tristate "Sequence Number IV Generator" From 622d42ef977adeae3d5a5d0eb893e4aaf1d5330c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 26 Mar 2026 13:12:43 -0700 Subject: [PATCH 102/129] crypto: s390 - Remove des and des3_ede code Since DES and Triple DES are obsolete, there is very little point in maintining architecture-optimized code for them. Remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/s390/configs/debug_defconfig | 1 - arch/s390/configs/defconfig | 1 - arch/s390/crypto/Kconfig | 16 - arch/s390/crypto/Makefile | 1 - arch/s390/crypto/des_s390.c | 502 ------------------------------ 5 files changed, 521 deletions(-) delete mode 100644 arch/s390/crypto/des_s390.c diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 98fd0a2f51c6..74e4bb236623 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -811,7 +811,6 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 0f4cedcab3ce..5cb7b715ba6b 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -796,7 +796,6 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig index 79a2d0034258..4218d21f1f00 100644 --- a/arch/s390/crypto/Kconfig +++ b/arch/s390/crypto/Kconfig @@ -31,22 +31,6 @@ config CRYPTO_AES_S390 key sizes and XTS mode is hardware accelerated for 256 and 512 bit keys. -config CRYPTO_DES_S390 - tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR" - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - help - Block ciphers: DES (FIPS 46-2) cipher algorithm - Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm - Length-preserving ciphers: DES with ECB, CBC, and CTR modes - Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes - - Architecture: s390 - - As of z990 the ECB and CBC mode are hardware accelerated. - As of z196 the CTR mode is hardware accelerated. - config CRYPTO_HMAC_S390 tristate "Keyed-hash message authentication code: HMAC" select CRYPTO_HASH diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 387a229e1038..bf5e061ebf13 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -3,7 +3,6 @@ # Cryptographic API # -obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o obj-$(CONFIG_S390_PRNG) += prng.o diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c deleted file mode 100644 index 8e75b83a5ddc..000000000000 --- a/arch/s390/crypto/des_s390.c +++ /dev/null @@ -1,502 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Cryptographic API. - * - * s390 implementation of the DES Cipher Algorithm. - * - * Copyright IBM Corp. 2003, 2011 - * Author(s): Thomas Spatzier - * Jan Glauber (jan.glauber@de.ibm.com) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) - -static u8 *ctrblk; -static DEFINE_MUTEX(ctrblk_lock); - -static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; - -struct s390_des_ctx { - u8 iv[DES_BLOCK_SIZE]; - u8 key[DES3_KEY_SIZE]; -}; - -static int des_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int key_len) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - int err; - - err = crypto_des_verify_key(tfm, key); - if (err) - return err; - - memcpy(ctx->key, key, key_len); - return 0; -} - -static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, - unsigned int key_len) -{ - return des_setkey(crypto_skcipher_tfm(tfm), key, key_len); -} - -static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - - cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE); -} - -static void s390_des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - - cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT, - ctx->key, out, in, DES_BLOCK_SIZE); -} - -static struct crypto_alg des_alg = { - .cra_name = "des", - .cra_driver_name = "des-s390", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES_KEY_SIZE, - .cia_max_keysize = DES_KEY_SIZE, - .cia_setkey = des_setkey, - .cia_encrypt = s390_des_encrypt, - .cia_decrypt = s390_des_decrypt, - } - } -}; - -static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes, n; - int ret; - - ret = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes) != 0) { - /* only use complete blocks */ - n = nbytes & ~(DES_BLOCK_SIZE - 1); - cpacf_km(fc, ctx->key, walk.dst.virt.addr, - walk.src.virt.addr, n); - ret = skcipher_walk_done(&walk, nbytes - n); - } - return ret; -} - -static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes, n; - int ret; - struct { - u8 iv[DES_BLOCK_SIZE]; - u8 key[DES3_KEY_SIZE]; - } param; - - ret = skcipher_walk_virt(&walk, req, false); - if (ret) - return ret; - memcpy(param.iv, walk.iv, DES_BLOCK_SIZE); - memcpy(param.key, ctx->key, DES3_KEY_SIZE); - while ((nbytes = walk.nbytes) != 0) { - /* only use complete blocks */ - n = nbytes & ~(DES_BLOCK_SIZE - 1); - cpacf_kmc(fc, ¶m, walk.dst.virt.addr, - walk.src.virt.addr, n); - memcpy(walk.iv, param.iv, DES_BLOCK_SIZE); - ret = skcipher_walk_done(&walk, nbytes - n); - } - return ret; -} - -static int ecb_des_encrypt(struct skcipher_request *req) -{ - return ecb_desall_crypt(req, CPACF_KM_DEA); -} - -static int ecb_des_decrypt(struct skcipher_request *req) -{ - return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT); -} - -static struct skcipher_alg ecb_des_alg = { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "ecb-des-s390", - .base.cra_priority = 400, /* combo: des + ecb */ - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_setkey_skcipher, - .encrypt = ecb_des_encrypt, - .decrypt = ecb_des_decrypt, -}; - -static int cbc_des_encrypt(struct skcipher_request *req) -{ - return cbc_desall_crypt(req, CPACF_KMC_DEA); -} - -static int cbc_des_decrypt(struct skcipher_request *req) -{ - return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT); -} - -static struct skcipher_alg cbc_des_alg = { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "cbc-des-s390", - .base.cra_priority = 400, /* combo: des + cbc */ - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_setkey_skcipher, - .encrypt = cbc_des_encrypt, - .decrypt = cbc_des_decrypt, -}; - -/* - * RFC2451: - * - * For DES-EDE3, there is no known need to reject weak or - * complementation keys. Any weakness is obviated by the use of - * multiple keys. - * - * However, if the first two or last two independent 64-bit keys are - * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the - * same as DES. Implementers MUST reject keys that exhibit this - * property. - * - * In fips mode additionally check for all 3 keys are unique. - * - */ -static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int key_len) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - int err; - - err = crypto_des3_ede_verify_key(tfm, key); - if (err) - return err; - - memcpy(ctx->key, key, key_len); - return 0; -} - -static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, - unsigned int key_len) -{ - return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len); -} - -static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - - cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE); -} - -static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - - cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT, - ctx->key, dst, src, DES_BLOCK_SIZE); -} - -static struct crypto_alg des3_alg = { - .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-s390", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES3_KEY_SIZE, - .cia_max_keysize = DES3_KEY_SIZE, - .cia_setkey = des3_setkey, - .cia_encrypt = des3_encrypt, - .cia_decrypt = des3_decrypt, - } - } -}; - -static int ecb_des3_encrypt(struct skcipher_request *req) -{ - return ecb_desall_crypt(req, CPACF_KM_TDEA_192); -} - -static int ecb_des3_decrypt(struct skcipher_request *req) -{ - return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT); -} - -static struct skcipher_alg ecb_des3_alg = { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3_ede-s390", - .base.cra_priority = 400, /* combo: des3 + ecb */ - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .setkey = des3_setkey_skcipher, - .encrypt = ecb_des3_encrypt, - .decrypt = ecb_des3_decrypt, -}; - -static int cbc_des3_encrypt(struct skcipher_request *req) -{ - return cbc_desall_crypt(req, CPACF_KMC_TDEA_192); -} - -static int cbc_des3_decrypt(struct skcipher_request *req) -{ - return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT); -} - -static struct skcipher_alg cbc_des3_alg = { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3_ede-s390", - .base.cra_priority = 400, /* combo: des3 + cbc */ - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des3_setkey_skcipher, - .encrypt = cbc_des3_encrypt, - .decrypt = cbc_des3_decrypt, -}; - -static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) -{ - unsigned int i, n; - - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); - memcpy(ctrptr, iv, DES_BLOCK_SIZE); - for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) { - memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE); - crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE); - ctrptr += DES_BLOCK_SIZE; - } - return n; -} - -static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); - u8 buf[DES_BLOCK_SIZE], *ctrptr; - struct skcipher_walk walk; - unsigned int n, nbytes; - int ret, locked; - - locked = mutex_trylock(&ctrblk_lock); - - ret = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) { - n = DES_BLOCK_SIZE; - if (nbytes >= 2*DES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk.iv, nbytes); - ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv; - cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr, - walk.src.virt.addr, n, ctrptr); - if (ctrptr == ctrblk) - memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(walk.iv, DES_BLOCK_SIZE); - ret = skcipher_walk_done(&walk, nbytes - n); - } - if (locked) - mutex_unlock(&ctrblk_lock); - /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ - if (nbytes) { - cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr, - DES_BLOCK_SIZE, walk.iv); - memcpy(walk.dst.virt.addr, buf, nbytes); - crypto_inc(walk.iv, DES_BLOCK_SIZE); - ret = skcipher_walk_done(&walk, 0); - } - return ret; -} - -static int ctr_des_crypt(struct skcipher_request *req) -{ - return ctr_desall_crypt(req, CPACF_KMCTR_DEA); -} - -static struct skcipher_alg ctr_des_alg = { - .base.cra_name = "ctr(des)", - .base.cra_driver_name = "ctr-des-s390", - .base.cra_priority = 400, /* combo: des + ctr */ - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_setkey_skcipher, - .encrypt = ctr_des_crypt, - .decrypt = ctr_des_crypt, - .chunksize = DES_BLOCK_SIZE, -}; - -static int ctr_des3_crypt(struct skcipher_request *req) -{ - return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192); -} - -static struct skcipher_alg ctr_des3_alg = { - .base.cra_name = "ctr(des3_ede)", - .base.cra_driver_name = "ctr-des3_ede-s390", - .base.cra_priority = 400, /* combo: des3 + ede */ - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct s390_des_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des3_setkey_skcipher, - .encrypt = ctr_des3_crypt, - .decrypt = ctr_des3_crypt, - .chunksize = DES_BLOCK_SIZE, -}; - -static struct crypto_alg *des_s390_algs_ptr[2]; -static int des_s390_algs_num; -static struct skcipher_alg *des_s390_skciphers_ptr[6]; -static int des_s390_skciphers_num; - -static int des_s390_register_alg(struct crypto_alg *alg) -{ - int ret; - - ret = crypto_register_alg(alg); - if (!ret) - des_s390_algs_ptr[des_s390_algs_num++] = alg; - return ret; -} - -static int des_s390_register_skcipher(struct skcipher_alg *alg) -{ - int ret; - - ret = crypto_register_skcipher(alg); - if (!ret) - des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg; - return ret; -} - -static void des_s390_exit(void) -{ - while (des_s390_algs_num--) - crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]); - while (des_s390_skciphers_num--) - crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]); - if (ctrblk) - free_page((unsigned long) ctrblk); -} - -static int __init des_s390_init(void) -{ - int ret; - - /* Query available functions for KM, KMC and KMCTR */ - cpacf_query(CPACF_KM, &km_functions); - cpacf_query(CPACF_KMC, &kmc_functions); - cpacf_query(CPACF_KMCTR, &kmctr_functions); - - if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) { - ret = des_s390_register_alg(&des_alg); - if (ret) - goto out_err; - ret = des_s390_register_skcipher(&ecb_des_alg); - if (ret) - goto out_err; - } - if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) { - ret = des_s390_register_skcipher(&cbc_des_alg); - if (ret) - goto out_err; - } - if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) { - ret = des_s390_register_alg(&des3_alg); - if (ret) - goto out_err; - ret = des_s390_register_skcipher(&ecb_des3_alg); - if (ret) - goto out_err; - } - if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) { - ret = des_s390_register_skcipher(&cbc_des3_alg); - if (ret) - goto out_err; - } - - if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) || - cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) { - ctrblk = (u8 *) __get_free_page(GFP_KERNEL); - if (!ctrblk) { - ret = -ENOMEM; - goto out_err; - } - } - - if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) { - ret = des_s390_register_skcipher(&ctr_des_alg); - if (ret) - goto out_err; - } - if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) { - ret = des_s390_register_skcipher(&ctr_des3_alg); - if (ret) - goto out_err; - } - - return 0; -out_err: - des_s390_exit(); - return ret; -} - -module_cpu_feature_match(S390_CPU_FEATURE_MSA, des_s390_init); -module_exit(des_s390_exit); - -MODULE_ALIAS_CRYPTO("des"); -MODULE_ALIAS_CRYPTO("des3_ede"); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); From 0e28a1a644c15f2f7786ec825220d3f28b8476b5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 26 Mar 2026 13:12:44 -0700 Subject: [PATCH 103/129] crypto: sparc - Remove des and des3_ede code Since DES and Triple DES are obsolete, there is very little point in maintining architecture-optimized code for them. Remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/sparc/crypto/Kconfig | 14 - arch/sparc/crypto/Makefile | 2 - arch/sparc/crypto/des_asm.S | 419 ------------------------------ arch/sparc/crypto/des_glue.c | 482 ----------------------------------- 4 files changed, 917 deletions(-) delete mode 100644 arch/sparc/crypto/des_asm.S delete mode 100644 arch/sparc/crypto/des_glue.c diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig index c1932ce46c7f..8db3f6eea5dc 100644 --- a/arch/sparc/crypto/Kconfig +++ b/arch/sparc/crypto/Kconfig @@ -2,20 +2,6 @@ menu "Accelerated Cryptographic Algorithms for CPU (sparc64)" -config CRYPTO_DES_SPARC64 - tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC" - depends on SPARC64 - select CRYPTO_ALGAPI - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - help - Block cipher: DES (FIPS 46-2) cipher algorithm - Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm - Length-preserving ciphers: DES with ECB and CBC modes - Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes - - Architecture: sparc64 - config CRYPTO_AES_SPARC64 tristate "Ciphers: AES, modes: ECB, CBC, CTR" depends on SPARC64 diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile index cdf9f4b3efbb..ab4a7765babf 100644 --- a/arch/sparc/crypto/Makefile +++ b/arch/sparc/crypto/Makefile @@ -4,9 +4,7 @@ # obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o -obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o aes-sparc64-y := aes_glue.o -des-sparc64-y := des_asm.o des_glue.o camellia-sparc64-y := camellia_asm.o camellia_glue.o diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S deleted file mode 100644 index d534446cbef9..000000000000 --- a/arch/sparc/crypto/des_asm.S +++ /dev/null @@ -1,419 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include -#include -#include - - .align 32 -ENTRY(des_sparc64_key_expand) - /* %o0=input_key, %o1=output_key */ - VISEntryHalf - ld [%o0 + 0x00], %f0 - ld [%o0 + 0x04], %f1 - DES_KEXPAND(0, 0, 0) - DES_KEXPAND(0, 1, 2) - DES_KEXPAND(2, 3, 6) - DES_KEXPAND(2, 2, 4) - DES_KEXPAND(6, 3, 10) - DES_KEXPAND(6, 2, 8) - DES_KEXPAND(10, 3, 14) - DES_KEXPAND(10, 2, 12) - DES_KEXPAND(14, 1, 16) - DES_KEXPAND(16, 3, 20) - DES_KEXPAND(16, 2, 18) - DES_KEXPAND(20, 3, 24) - DES_KEXPAND(20, 2, 22) - DES_KEXPAND(24, 3, 28) - DES_KEXPAND(24, 2, 26) - DES_KEXPAND(28, 1, 30) - std %f0, [%o1 + 0x00] - std %f2, [%o1 + 0x08] - std %f4, [%o1 + 0x10] - std %f6, [%o1 + 0x18] - std %f8, [%o1 + 0x20] - std %f10, [%o1 + 0x28] - std %f12, [%o1 + 0x30] - std %f14, [%o1 + 0x38] - std %f16, [%o1 + 0x40] - std %f18, [%o1 + 0x48] - std %f20, [%o1 + 0x50] - std %f22, [%o1 + 0x58] - std %f24, [%o1 + 0x60] - std %f26, [%o1 + 0x68] - std %f28, [%o1 + 0x70] - std %f30, [%o1 + 0x78] - retl - VISExitHalf -ENDPROC(des_sparc64_key_expand) - - .align 32 -ENTRY(des_sparc64_crypt) - /* %o0=key, %o1=input, %o2=output */ - VISEntry - ldd [%o1 + 0x00], %f32 - ldd [%o0 + 0x00], %f0 - ldd [%o0 + 0x08], %f2 - ldd [%o0 + 0x10], %f4 - ldd [%o0 + 0x18], %f6 - ldd [%o0 + 0x20], %f8 - ldd [%o0 + 0x28], %f10 - ldd [%o0 + 0x30], %f12 - ldd [%o0 + 0x38], %f14 - ldd [%o0 + 0x40], %f16 - ldd [%o0 + 0x48], %f18 - ldd [%o0 + 0x50], %f20 - ldd [%o0 + 0x58], %f22 - ldd [%o0 + 0x60], %f24 - ldd [%o0 + 0x68], %f26 - ldd [%o0 + 0x70], %f28 - ldd [%o0 + 0x78], %f30 - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - DES_ROUND(4, 6, 32, 32) - DES_ROUND(8, 10, 32, 32) - DES_ROUND(12, 14, 32, 32) - DES_ROUND(16, 18, 32, 32) - DES_ROUND(20, 22, 32, 32) - DES_ROUND(24, 26, 32, 32) - DES_ROUND(28, 30, 32, 32) - DES_IIP(32, 32) - std %f32, [%o2 + 0x00] - retl - VISExit -ENDPROC(des_sparc64_crypt) - - .align 32 -ENTRY(des_sparc64_load_keys) - /* %o0=key */ - VISEntry - ldd [%o0 + 0x00], %f0 - ldd [%o0 + 0x08], %f2 - ldd [%o0 + 0x10], %f4 - ldd [%o0 + 0x18], %f6 - ldd [%o0 + 0x20], %f8 - ldd [%o0 + 0x28], %f10 - ldd [%o0 + 0x30], %f12 - ldd [%o0 + 0x38], %f14 - ldd [%o0 + 0x40], %f16 - ldd [%o0 + 0x48], %f18 - ldd [%o0 + 0x50], %f20 - ldd [%o0 + 0x58], %f22 - ldd [%o0 + 0x60], %f24 - ldd [%o0 + 0x68], %f26 - ldd [%o0 + 0x70], %f28 - retl - ldd [%o0 + 0x78], %f30 -ENDPROC(des_sparc64_load_keys) - - .align 32 -ENTRY(des_sparc64_ecb_crypt) - /* %o0=input, %o1=output, %o2=len */ -1: ldd [%o0 + 0x00], %f32 - add %o0, 0x08, %o0 - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - DES_ROUND(4, 6, 32, 32) - DES_ROUND(8, 10, 32, 32) - DES_ROUND(12, 14, 32, 32) - DES_ROUND(16, 18, 32, 32) - DES_ROUND(20, 22, 32, 32) - DES_ROUND(24, 26, 32, 32) - DES_ROUND(28, 30, 32, 32) - DES_IIP(32, 32) - std %f32, [%o1 + 0x00] - subcc %o2, 0x08, %o2 - bne,pt %icc, 1b - add %o1, 0x08, %o1 - retl - nop -ENDPROC(des_sparc64_ecb_crypt) - - .align 32 -ENTRY(des_sparc64_cbc_encrypt) - /* %o0=input, %o1=output, %o2=len, %o3=IV */ - ldd [%o3 + 0x00], %f32 -1: ldd [%o0 + 0x00], %f34 - fxor %f32, %f34, %f32 - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - DES_ROUND(4, 6, 32, 32) - DES_ROUND(8, 10, 32, 32) - DES_ROUND(12, 14, 32, 32) - DES_ROUND(16, 18, 32, 32) - DES_ROUND(20, 22, 32, 32) - DES_ROUND(24, 26, 32, 32) - DES_ROUND(28, 30, 32, 32) - DES_IIP(32, 32) - std %f32, [%o1 + 0x00] - add %o0, 0x08, %o0 - subcc %o2, 0x08, %o2 - bne,pt %icc, 1b - add %o1, 0x08, %o1 - retl - std %f32, [%o3 + 0x00] -ENDPROC(des_sparc64_cbc_encrypt) - - .align 32 -ENTRY(des_sparc64_cbc_decrypt) - /* %o0=input, %o1=output, %o2=len, %o3=IV */ - ldd [%o3 + 0x00], %f34 -1: ldd [%o0 + 0x00], %f36 - DES_IP(36, 32) - DES_ROUND(0, 2, 32, 32) - DES_ROUND(4, 6, 32, 32) - DES_ROUND(8, 10, 32, 32) - DES_ROUND(12, 14, 32, 32) - DES_ROUND(16, 18, 32, 32) - DES_ROUND(20, 22, 32, 32) - DES_ROUND(24, 26, 32, 32) - DES_ROUND(28, 30, 32, 32) - DES_IIP(32, 32) - fxor %f32, %f34, %f32 - fsrc2 %f36, %f34 - std %f32, [%o1 + 0x00] - add %o0, 0x08, %o0 - subcc %o2, 0x08, %o2 - bne,pt %icc, 1b - add %o1, 0x08, %o1 - retl - std %f36, [%o3 + 0x00] -ENDPROC(des_sparc64_cbc_decrypt) - - .align 32 -ENTRY(des3_ede_sparc64_crypt) - /* %o0=key, %o1=input, %o2=output */ - VISEntry - ldd [%o1 + 0x00], %f32 - ldd [%o0 + 0x00], %f0 - ldd [%o0 + 0x08], %f2 - ldd [%o0 + 0x10], %f4 - ldd [%o0 + 0x18], %f6 - ldd [%o0 + 0x20], %f8 - ldd [%o0 + 0x28], %f10 - ldd [%o0 + 0x30], %f12 - ldd [%o0 + 0x38], %f14 - ldd [%o0 + 0x40], %f16 - ldd [%o0 + 0x48], %f18 - ldd [%o0 + 0x50], %f20 - ldd [%o0 + 0x58], %f22 - ldd [%o0 + 0x60], %f24 - ldd [%o0 + 0x68], %f26 - ldd [%o0 + 0x70], %f28 - ldd [%o0 + 0x78], %f30 - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - ldd [%o0 + 0x80], %f0 - ldd [%o0 + 0x88], %f2 - DES_ROUND(4, 6, 32, 32) - ldd [%o0 + 0x90], %f4 - ldd [%o0 + 0x98], %f6 - DES_ROUND(8, 10, 32, 32) - ldd [%o0 + 0xa0], %f8 - ldd [%o0 + 0xa8], %f10 - DES_ROUND(12, 14, 32, 32) - ldd [%o0 + 0xb0], %f12 - ldd [%o0 + 0xb8], %f14 - DES_ROUND(16, 18, 32, 32) - ldd [%o0 + 0xc0], %f16 - ldd [%o0 + 0xc8], %f18 - DES_ROUND(20, 22, 32, 32) - ldd [%o0 + 0xd0], %f20 - ldd [%o0 + 0xd8], %f22 - DES_ROUND(24, 26, 32, 32) - ldd [%o0 + 0xe0], %f24 - ldd [%o0 + 0xe8], %f26 - DES_ROUND(28, 30, 32, 32) - ldd [%o0 + 0xf0], %f28 - ldd [%o0 + 0xf8], %f30 - DES_IIP(32, 32) - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - ldd [%o0 + 0x100], %f0 - ldd [%o0 + 0x108], %f2 - DES_ROUND(4, 6, 32, 32) - ldd [%o0 + 0x110], %f4 - ldd [%o0 + 0x118], %f6 - DES_ROUND(8, 10, 32, 32) - ldd [%o0 + 0x120], %f8 - ldd [%o0 + 0x128], %f10 - DES_ROUND(12, 14, 32, 32) - ldd [%o0 + 0x130], %f12 - ldd [%o0 + 0x138], %f14 - DES_ROUND(16, 18, 32, 32) - ldd [%o0 + 0x140], %f16 - ldd [%o0 + 0x148], %f18 - DES_ROUND(20, 22, 32, 32) - ldd [%o0 + 0x150], %f20 - ldd [%o0 + 0x158], %f22 - DES_ROUND(24, 26, 32, 32) - ldd [%o0 + 0x160], %f24 - ldd [%o0 + 0x168], %f26 - DES_ROUND(28, 30, 32, 32) - ldd [%o0 + 0x170], %f28 - ldd [%o0 + 0x178], %f30 - DES_IIP(32, 32) - DES_IP(32, 32) - DES_ROUND(0, 2, 32, 32) - DES_ROUND(4, 6, 32, 32) - DES_ROUND(8, 10, 32, 32) - DES_ROUND(12, 14, 32, 32) - DES_ROUND(16, 18, 32, 32) - DES_ROUND(20, 22, 32, 32) - DES_ROUND(24, 26, 32, 32) - DES_ROUND(28, 30, 32, 32) - DES_IIP(32, 32) - - std %f32, [%o2 + 0x00] - retl - VISExit -ENDPROC(des3_ede_sparc64_crypt) - - .align 32 -ENTRY(des3_ede_sparc64_load_keys) - /* %o0=key */ - VISEntry - ldd [%o0 + 0x00], %f0 - ldd [%o0 + 0x08], %f2 - ldd [%o0 + 0x10], %f4 - ldd [%o0 + 0x18], %f6 - ldd [%o0 + 0x20], %f8 - ldd [%o0 + 0x28], %f10 - ldd [%o0 + 0x30], %f12 - ldd [%o0 + 0x38], %f14 - ldd [%o0 + 0x40], %f16 - ldd [%o0 + 0x48], %f18 - ldd [%o0 + 0x50], %f20 - ldd [%o0 + 0x58], %f22 - ldd [%o0 + 0x60], %f24 - ldd [%o0 + 0x68], %f26 - ldd [%o0 + 0x70], %f28 - ldd [%o0 + 0x78], %f30 - ldd [%o0 + 0x80], %f32 - ldd [%o0 + 0x88], %f34 - ldd [%o0 + 0x90], %f36 - ldd [%o0 + 0x98], %f38 - ldd [%o0 + 0xa0], %f40 - ldd [%o0 + 0xa8], %f42 - ldd [%o0 + 0xb0], %f44 - ldd [%o0 + 0xb8], %f46 - ldd [%o0 + 0xc0], %f48 - ldd [%o0 + 0xc8], %f50 - ldd [%o0 + 0xd0], %f52 - ldd [%o0 + 0xd8], %f54 - ldd [%o0 + 0xe0], %f56 - retl - ldd [%o0 + 0xe8], %f58 -ENDPROC(des3_ede_sparc64_load_keys) - -#define DES3_LOOP_BODY(X) \ - DES_IP(X, X) \ - DES_ROUND(0, 2, X, X) \ - DES_ROUND(4, 6, X, X) \ - DES_ROUND(8, 10, X, X) \ - DES_ROUND(12, 14, X, X) \ - DES_ROUND(16, 18, X, X) \ - ldd [%o0 + 0xf0], %f16; \ - ldd [%o0 + 0xf8], %f18; \ - DES_ROUND(20, 22, X, X) \ - ldd [%o0 + 0x100], %f20; \ - ldd [%o0 + 0x108], %f22; \ - DES_ROUND(24, 26, X, X) \ - ldd [%o0 + 0x110], %f24; \ - ldd [%o0 + 0x118], %f26; \ - DES_ROUND(28, 30, X, X) \ - ldd [%o0 + 0x120], %f28; \ - ldd [%o0 + 0x128], %f30; \ - DES_IIP(X, X) \ - DES_IP(X, X) \ - DES_ROUND(32, 34, X, X) \ - ldd [%o0 + 0x130], %f0; \ - ldd [%o0 + 0x138], %f2; \ - DES_ROUND(36, 38, X, X) \ - ldd [%o0 + 0x140], %f4; \ - ldd [%o0 + 0x148], %f6; \ - DES_ROUND(40, 42, X, X) \ - ldd [%o0 + 0x150], %f8; \ - ldd [%o0 + 0x158], %f10; \ - DES_ROUND(44, 46, X, X) \ - ldd [%o0 + 0x160], %f12; \ - ldd [%o0 + 0x168], %f14; \ - DES_ROUND(48, 50, X, X) \ - DES_ROUND(52, 54, X, X) \ - DES_ROUND(56, 58, X, X) \ - DES_ROUND(16, 18, X, X) \ - ldd [%o0 + 0x170], %f16; \ - ldd [%o0 + 0x178], %f18; \ - DES_IIP(X, X) \ - DES_IP(X, X) \ - DES_ROUND(20, 22, X, X) \ - ldd [%o0 + 0x50], %f20; \ - ldd [%o0 + 0x58], %f22; \ - DES_ROUND(24, 26, X, X) \ - ldd [%o0 + 0x60], %f24; \ - ldd [%o0 + 0x68], %f26; \ - DES_ROUND(28, 30, X, X) \ - ldd [%o0 + 0x70], %f28; \ - ldd [%o0 + 0x78], %f30; \ - DES_ROUND(0, 2, X, X) \ - ldd [%o0 + 0x00], %f0; \ - ldd [%o0 + 0x08], %f2; \ - DES_ROUND(4, 6, X, X) \ - ldd [%o0 + 0x10], %f4; \ - ldd [%o0 + 0x18], %f6; \ - DES_ROUND(8, 10, X, X) \ - ldd [%o0 + 0x20], %f8; \ - ldd [%o0 + 0x28], %f10; \ - DES_ROUND(12, 14, X, X) \ - ldd [%o0 + 0x30], %f12; \ - ldd [%o0 + 0x38], %f14; \ - DES_ROUND(16, 18, X, X) \ - ldd [%o0 + 0x40], %f16; \ - ldd [%o0 + 0x48], %f18; \ - DES_IIP(X, X) - - .align 32 -ENTRY(des3_ede_sparc64_ecb_crypt) - /* %o0=key, %o1=input, %o2=output, %o3=len */ -1: ldd [%o1 + 0x00], %f60 - DES3_LOOP_BODY(60) - std %f60, [%o2 + 0x00] - add %o1, 0x08, %o1 - subcc %o3, 0x08, %o3 - bne,pt %icc, 1b - add %o2, 0x08, %o2 - retl - nop -ENDPROC(des3_ede_sparc64_ecb_crypt) - - .align 32 -ENTRY(des3_ede_sparc64_cbc_encrypt) - /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */ - ldd [%o4 + 0x00], %f60 -1: ldd [%o1 + 0x00], %f62 - fxor %f60, %f62, %f60 - DES3_LOOP_BODY(60) - std %f60, [%o2 + 0x00] - add %o1, 0x08, %o1 - subcc %o3, 0x08, %o3 - bne,pt %icc, 1b - add %o2, 0x08, %o2 - retl - std %f60, [%o4 + 0x00] -ENDPROC(des3_ede_sparc64_cbc_encrypt) - - .align 32 -ENTRY(des3_ede_sparc64_cbc_decrypt) - /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */ - ldd [%o4 + 0x00], %f62 -1: ldx [%o1 + 0x00], %g1 - MOVXTOD_G1_F60 - DES3_LOOP_BODY(60) - fxor %f62, %f60, %f60 - MOVXTOD_G1_F62 - std %f60, [%o2 + 0x00] - add %o1, 0x08, %o1 - subcc %o3, 0x08, %o3 - bne,pt %icc, 1b - add %o2, 0x08, %o2 - retl - stx %g1, [%o4 + 0x00] -ENDPROC(des3_ede_sparc64_cbc_decrypt) diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c deleted file mode 100644 index e50ec4cd57cd..000000000000 --- a/arch/sparc/crypto/des_glue.c +++ /dev/null @@ -1,482 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Glue code for DES encryption optimized for sparc64 crypto opcodes. - * - * Copyright (C) 2012 David S. Miller - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -struct des_sparc64_ctx { - u64 encrypt_expkey[DES_EXPKEY_WORDS / 2]; - u64 decrypt_expkey[DES_EXPKEY_WORDS / 2]; -}; - -struct des3_ede_sparc64_ctx { - u64 encrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2]; - u64 decrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2]; -}; - -static void encrypt_to_decrypt(u64 *d, const u64 *e) -{ - const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1; - int i; - - for (i = 0; i < DES_EXPKEY_WORDS / 2; i++) - *d++ = *s--; -} - -extern void des_sparc64_key_expand(const u32 *input_key, u64 *key); - -static int des_set_key(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); - int err; - - /* Even though we have special instructions for key expansion, - * we call des_verify_key() so that we don't have to write our own - * weak key detection code. - */ - err = crypto_des_verify_key(tfm, key); - if (err) - return err; - - des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]); - encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]); - - return 0; -} - -static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - return des_set_key(crypto_skcipher_tfm(tfm), key, keylen); -} - -extern void des_sparc64_crypt(const u64 *key, const u64 *input, - u64 *output); - -static void sparc_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); - const u64 *K = ctx->encrypt_expkey; - - des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); -} - -static void sparc_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); - const u64 *K = ctx->decrypt_expkey; - - des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); -} - -extern void des_sparc64_load_keys(const u64 *key); - -extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output, - unsigned int len); - -static int __ecb_crypt(struct skcipher_request *req, bool encrypt) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; - - if (encrypt) - des_sparc64_load_keys(&ctx->encrypt_expkey[0]); - else - des_sparc64_load_keys(&ctx->decrypt_expkey[0]); - while ((nbytes = walk.nbytes) != 0) { - des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr, - round_down(nbytes, DES_BLOCK_SIZE)); - err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); - } - fprs_write(0); - return err; -} - -static int ecb_encrypt(struct skcipher_request *req) -{ - return __ecb_crypt(req, true); -} - -static int ecb_decrypt(struct skcipher_request *req) -{ - return __ecb_crypt(req, false); -} - -extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output, - unsigned int len, u64 *iv); - -extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output, - unsigned int len, u64 *iv); - -static int __cbc_crypt(struct skcipher_request *req, bool encrypt) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; - - if (encrypt) - des_sparc64_load_keys(&ctx->encrypt_expkey[0]); - else - des_sparc64_load_keys(&ctx->decrypt_expkey[0]); - while ((nbytes = walk.nbytes) != 0) { - if (encrypt) - des_sparc64_cbc_encrypt(walk.src.virt.addr, - walk.dst.virt.addr, - round_down(nbytes, - DES_BLOCK_SIZE), - walk.iv); - else - des_sparc64_cbc_decrypt(walk.src.virt.addr, - walk.dst.virt.addr, - round_down(nbytes, - DES_BLOCK_SIZE), - walk.iv); - err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); - } - fprs_write(0); - return err; -} - -static int cbc_encrypt(struct skcipher_request *req) -{ - return __cbc_crypt(req, true); -} - -static int cbc_decrypt(struct skcipher_request *req) -{ - return __cbc_crypt(req, false); -} - -static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); - u64 k1[DES_EXPKEY_WORDS / 2]; - u64 k2[DES_EXPKEY_WORDS / 2]; - u64 k3[DES_EXPKEY_WORDS / 2]; - int err; - - err = crypto_des3_ede_verify_key(tfm, key); - if (err) - return err; - - des_sparc64_key_expand((const u32 *)key, k1); - key += DES_KEY_SIZE; - des_sparc64_key_expand((const u32 *)key, k2); - key += DES_KEY_SIZE; - des_sparc64_key_expand((const u32 *)key, k3); - - memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1)); - encrypt_to_decrypt(&dctx->encrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0]); - memcpy(&dctx->encrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2], - &k3[0], sizeof(k3)); - - encrypt_to_decrypt(&dctx->decrypt_expkey[0], &k3[0]); - memcpy(&dctx->decrypt_expkey[DES_EXPKEY_WORDS / 2], - &k2[0], sizeof(k2)); - encrypt_to_decrypt(&dctx->decrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2], - &k1[0]); - - return 0; -} - -static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen); -} - -extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input, - u64 *output); - -static void sparc_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); - const u64 *K = ctx->encrypt_expkey; - - des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); -} - -static void sparc_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); - const u64 *K = ctx->decrypt_expkey; - - des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); -} - -extern void des3_ede_sparc64_load_keys(const u64 *key); - -extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input, - u64 *output, unsigned int len); - -static int __ecb3_crypt(struct skcipher_request *req, bool encrypt) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - const u64 *K; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; - - if (encrypt) - K = &ctx->encrypt_expkey[0]; - else - K = &ctx->decrypt_expkey[0]; - des3_ede_sparc64_load_keys(K); - while ((nbytes = walk.nbytes) != 0) { - des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr, - walk.dst.virt.addr, - round_down(nbytes, DES_BLOCK_SIZE)); - err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); - } - fprs_write(0); - return err; -} - -static int ecb3_encrypt(struct skcipher_request *req) -{ - return __ecb3_crypt(req, true); -} - -static int ecb3_decrypt(struct skcipher_request *req) -{ - return __ecb3_crypt(req, false); -} - -extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input, - u64 *output, unsigned int len, - u64 *iv); - -extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input, - u64 *output, unsigned int len, - u64 *iv); - -static int __cbc3_crypt(struct skcipher_request *req, bool encrypt) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - const u64 *K; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; - - if (encrypt) - K = &ctx->encrypt_expkey[0]; - else - K = &ctx->decrypt_expkey[0]; - des3_ede_sparc64_load_keys(K); - while ((nbytes = walk.nbytes) != 0) { - if (encrypt) - des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr, - walk.dst.virt.addr, - round_down(nbytes, - DES_BLOCK_SIZE), - walk.iv); - else - des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr, - walk.dst.virt.addr, - round_down(nbytes, - DES_BLOCK_SIZE), - walk.iv); - err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); - } - fprs_write(0); - return err; -} - -static int cbc3_encrypt(struct skcipher_request *req) -{ - return __cbc3_crypt(req, true); -} - -static int cbc3_decrypt(struct skcipher_request *req) -{ - return __cbc3_crypt(req, false); -} - -static struct crypto_alg cipher_algs[] = { - { - .cra_name = "des", - .cra_driver_name = "des-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des_sparc64_ctx), - .cra_alignmask = 7, - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES_KEY_SIZE, - .cia_max_keysize = DES_KEY_SIZE, - .cia_setkey = des_set_key, - .cia_encrypt = sparc_des_encrypt, - .cia_decrypt = sparc_des_decrypt - } - } - }, { - .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .cra_alignmask = 7, - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES3_EDE_KEY_SIZE, - .cia_max_keysize = DES3_EDE_KEY_SIZE, - .cia_setkey = des3_ede_set_key, - .cia_encrypt = sparc_des3_ede_encrypt, - .cia_decrypt = sparc_des3_ede_decrypt - } - } - } -}; - -static struct skcipher_alg skcipher_algs[] = { - { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "ecb-des-sparc64", - .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_set_key_skcipher, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "cbc-des-sparc64", - .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_set_key_skcipher, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3_ede-sparc64", - .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = des3_ede_set_key_skcipher, - .encrypt = ecb3_encrypt, - .decrypt = ecb3_decrypt, - }, { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3_ede-sparc64", - .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = des3_ede_set_key_skcipher, - .encrypt = cbc3_encrypt, - .decrypt = cbc3_decrypt, - } -}; - -static bool __init sparc64_has_des_opcode(void) -{ - unsigned long cfr; - - if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) - return false; - - __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); - if (!(cfr & CFR_DES)) - return false; - - return true; -} - -static int __init des_sparc64_mod_init(void) -{ - int err; - - if (!sparc64_has_des_opcode()) { - pr_info("sparc64 des opcodes not available.\n"); - return -ENODEV; - } - pr_info("Using sparc64 des opcodes optimized DES implementation\n"); - err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); - if (err) - return err; - err = crypto_register_skciphers(skcipher_algs, - ARRAY_SIZE(skcipher_algs)); - if (err) - crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); - return err; -} - -static void __exit des_sparc64_mod_fini(void) -{ - crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); - crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); -} - -module_init(des_sparc64_mod_init); -module_exit(des_sparc64_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); - -MODULE_ALIAS_CRYPTO("des"); -MODULE_ALIAS_CRYPTO("des3_ede"); - -#include "crop_devid.c" From 9a73869cb55051a2cdd4b039d75298e32014b25f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 26 Mar 2026 13:12:45 -0700 Subject: [PATCH 104/129] crypto: x86 - Remove des and des3_ede code Since DES and Triple DES are obsolete, there is very little point in maintining architecture-optimized code for them. Remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/Kconfig | 14 - arch/x86/crypto/Makefile | 3 - arch/x86/crypto/des3_ede-asm_64.S | 831 ------------------------------ arch/x86/crypto/des3_ede_glue.c | 391 -------------- 4 files changed, 1239 deletions(-) delete mode 100644 arch/x86/crypto/des3_ede-asm_64.S delete mode 100644 arch/x86/crypto/des3_ede_glue.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 7fb2319a0916..03c5cdfe8eb0 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -99,20 +99,6 @@ config CRYPTO_CAST6_AVX_X86_64 Processes eight blocks in parallel. -config CRYPTO_DES3_EDE_X86_64 - tristate "Ciphers: Triple DES EDE with modes: ECB, CBC" - depends on 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - imply CRYPTO_CTR - help - Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm - Length-preserving ciphers: Triple DES EDE with ECB and CBC modes - - Architecture: x86_64 - - Processes one or three blocks in parallel. - config CRYPTO_SERPENT_SSE2_X86_64 tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)" depends on 64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b21ad0978c52..cb07260f7f4f 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -20,9 +20,6 @@ serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o -obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o -des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o - obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S deleted file mode 100644 index cf21b998e77c..000000000000 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ /dev/null @@ -1,831 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher - * - * Copyright © 2014 Jussi Kivilinna - */ - -#include - -.file "des3_ede-asm_64.S" -.text - -#define s1 .L_s1 -#define s2 ((s1) + (64*8)) -#define s3 ((s2) + (64*8)) -#define s4 ((s3) + (64*8)) -#define s5 ((s4) + (64*8)) -#define s6 ((s5) + (64*8)) -#define s7 ((s6) + (64*8)) -#define s8 ((s7) + (64*8)) - -/* register macros */ -#define CTX %rdi - -#define RL0 %r8 -#define RL1 %r9 -#define RL2 %r10 - -#define RL0d %r8d -#define RL1d %r9d -#define RL2d %r10d - -#define RR0 %r11 -#define RR1 %r12 -#define RR2 %r13 - -#define RR0d %r11d -#define RR1d %r12d -#define RR2d %r13d - -#define RW0 %rax -#define RW1 %rbx -#define RW2 %rcx - -#define RW0d %eax -#define RW1d %ebx -#define RW2d %ecx - -#define RW0bl %al -#define RW1bl %bl -#define RW2bl %cl - -#define RW0bh %ah -#define RW1bh %bh -#define RW2bh %ch - -#define RT0 %r15 -#define RT1 %rsi -#define RT2 %r14 -#define RT3 %rdx - -#define RT0d %r15d -#define RT1d %esi -#define RT2d %r14d -#define RT3d %edx - -/*********************************************************************** - * 1-way 3DES - ***********************************************************************/ -#define do_permutation(a, b, offset, mask) \ - movl a, RT0d; \ - shrl $(offset), RT0d; \ - xorl b, RT0d; \ - andl $(mask), RT0d; \ - xorl RT0d, b; \ - shll $(offset), RT0d; \ - xorl RT0d, a; - -#define expand_to_64bits(val, mask) \ - movl val##d, RT0d; \ - rorl $4, RT0d; \ - shlq $32, RT0; \ - orq RT0, val; \ - andq mask, val; - -#define compress_to_64bits(val) \ - movq val, RT0; \ - shrq $32, RT0; \ - roll $4, RT0d; \ - orl RT0d, val##d; - -#define initial_permutation(left, right) \ - do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ - do_permutation(left##d, right##d, 16, 0x0000ffff); \ - do_permutation(right##d, left##d, 2, 0x33333333); \ - do_permutation(right##d, left##d, 8, 0x00ff00ff); \ - movabs $0x3f3f3f3f3f3f3f3f, RT3; \ - movl left##d, RW0d; \ - roll $1, right##d; \ - xorl right##d, RW0d; \ - andl $0xaaaaaaaa, RW0d; \ - xorl RW0d, left##d; \ - xorl RW0d, right##d; \ - roll $1, left##d; \ - expand_to_64bits(right, RT3); \ - expand_to_64bits(left, RT3); - -#define final_permutation(left, right) \ - compress_to_64bits(right); \ - compress_to_64bits(left); \ - movl right##d, RW0d; \ - rorl $1, left##d; \ - xorl left##d, RW0d; \ - andl $0xaaaaaaaa, RW0d; \ - xorl RW0d, right##d; \ - xorl RW0d, left##d; \ - rorl $1, right##d; \ - do_permutation(right##d, left##d, 8, 0x00ff00ff); \ - do_permutation(right##d, left##d, 2, 0x33333333); \ - do_permutation(left##d, right##d, 16, 0x0000ffff); \ - do_permutation(left##d, right##d, 4, 0x0f0f0f0f); - -#define round1(n, from, to, load_next_key) \ - xorq from, RW0; \ - \ - movzbl RW0bl, RT0d; \ - movzbl RW0bh, RT1d; \ - shrq $16, RW0; \ - movzbl RW0bl, RT2d; \ - movzbl RW0bh, RT3d; \ - shrq $16, RW0; \ - leaq s8(%rip), RW1; \ - movq (RW1, RT0, 8), RT0; \ - leaq s6(%rip), RW1; \ - xorq (RW1, RT1, 8), to; \ - movzbl RW0bl, RL1d; \ - movzbl RW0bh, RT1d; \ - shrl $16, RW0d; \ - leaq s4(%rip), RW1; \ - xorq (RW1, RT2, 8), RT0; \ - leaq s2(%rip), RW1; \ - xorq (RW1, RT3, 8), to; \ - movzbl RW0bl, RT2d; \ - movzbl RW0bh, RT3d; \ - leaq s7(%rip), RW1; \ - xorq (RW1, RL1, 8), RT0; \ - leaq s5(%rip), RW1; \ - xorq (RW1, RT1, 8), to; \ - leaq s3(%rip), RW1; \ - xorq (RW1, RT2, 8), RT0; \ - load_next_key(n, RW0); \ - xorq RT0, to; \ - leaq s1(%rip), RW1; \ - xorq (RW1, RT3, 8), to; \ - -#define load_next_key(n, RWx) \ - movq (((n) + 1) * 8)(CTX), RWx; - -#define dummy2(a, b) /*_*/ - -#define read_block(io, left, right) \ - movl (io), left##d; \ - movl 4(io), right##d; \ - bswapl left##d; \ - bswapl right##d; - -#define write_block(io, left, right) \ - bswapl left##d; \ - bswapl right##d; \ - movl left##d, (io); \ - movl right##d, 4(io); - -SYM_FUNC_START(des3_ede_x86_64_crypt_blk) - /* input: - * %rdi: round keys, CTX - * %rsi: dst - * %rdx: src - */ - pushq %rbx; - pushq %r12; - pushq %r13; - pushq %r14; - pushq %r15; - - pushq %rsi; /* dst */ - - read_block(%rdx, RL0, RR0); - initial_permutation(RL0, RR0); - - movq (CTX), RW0; - - round1(0, RR0, RL0, load_next_key); - round1(1, RL0, RR0, load_next_key); - round1(2, RR0, RL0, load_next_key); - round1(3, RL0, RR0, load_next_key); - round1(4, RR0, RL0, load_next_key); - round1(5, RL0, RR0, load_next_key); - round1(6, RR0, RL0, load_next_key); - round1(7, RL0, RR0, load_next_key); - round1(8, RR0, RL0, load_next_key); - round1(9, RL0, RR0, load_next_key); - round1(10, RR0, RL0, load_next_key); - round1(11, RL0, RR0, load_next_key); - round1(12, RR0, RL0, load_next_key); - round1(13, RL0, RR0, load_next_key); - round1(14, RR0, RL0, load_next_key); - round1(15, RL0, RR0, load_next_key); - - round1(16+0, RL0, RR0, load_next_key); - round1(16+1, RR0, RL0, load_next_key); - round1(16+2, RL0, RR0, load_next_key); - round1(16+3, RR0, RL0, load_next_key); - round1(16+4, RL0, RR0, load_next_key); - round1(16+5, RR0, RL0, load_next_key); - round1(16+6, RL0, RR0, load_next_key); - round1(16+7, RR0, RL0, load_next_key); - round1(16+8, RL0, RR0, load_next_key); - round1(16+9, RR0, RL0, load_next_key); - round1(16+10, RL0, RR0, load_next_key); - round1(16+11, RR0, RL0, load_next_key); - round1(16+12, RL0, RR0, load_next_key); - round1(16+13, RR0, RL0, load_next_key); - round1(16+14, RL0, RR0, load_next_key); - round1(16+15, RR0, RL0, load_next_key); - - round1(32+0, RR0, RL0, load_next_key); - round1(32+1, RL0, RR0, load_next_key); - round1(32+2, RR0, RL0, load_next_key); - round1(32+3, RL0, RR0, load_next_key); - round1(32+4, RR0, RL0, load_next_key); - round1(32+5, RL0, RR0, load_next_key); - round1(32+6, RR0, RL0, load_next_key); - round1(32+7, RL0, RR0, load_next_key); - round1(32+8, RR0, RL0, load_next_key); - round1(32+9, RL0, RR0, load_next_key); - round1(32+10, RR0, RL0, load_next_key); - round1(32+11, RL0, RR0, load_next_key); - round1(32+12, RR0, RL0, load_next_key); - round1(32+13, RL0, RR0, load_next_key); - round1(32+14, RR0, RL0, load_next_key); - round1(32+15, RL0, RR0, dummy2); - - final_permutation(RR0, RL0); - - popq %rsi /* dst */ - write_block(%rsi, RR0, RL0); - - popq %r15; - popq %r14; - popq %r13; - popq %r12; - popq %rbx; - - RET; -SYM_FUNC_END(des3_ede_x86_64_crypt_blk) - -/*********************************************************************** - * 3-way 3DES - ***********************************************************************/ -#define expand_to_64bits(val, mask) \ - movl val##d, RT0d; \ - rorl $4, RT0d; \ - shlq $32, RT0; \ - orq RT0, val; \ - andq mask, val; - -#define compress_to_64bits(val) \ - movq val, RT0; \ - shrq $32, RT0; \ - roll $4, RT0d; \ - orl RT0d, val##d; - -#define initial_permutation3(left, right) \ - do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ - do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ - do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ - do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ - do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ - do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ - \ - do_permutation(right##0d, left##0d, 2, 0x33333333); \ - do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ - do_permutation(right##1d, left##1d, 2, 0x33333333); \ - do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ - do_permutation(right##2d, left##2d, 2, 0x33333333); \ - do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ - \ - movabs $0x3f3f3f3f3f3f3f3f, RT3; \ - \ - movl left##0d, RW0d; \ - roll $1, right##0d; \ - xorl right##0d, RW0d; \ - andl $0xaaaaaaaa, RW0d; \ - xorl RW0d, left##0d; \ - xorl RW0d, right##0d; \ - roll $1, left##0d; \ - expand_to_64bits(right##0, RT3); \ - expand_to_64bits(left##0, RT3); \ - movl left##1d, RW1d; \ - roll $1, right##1d; \ - xorl right##1d, RW1d; \ - andl $0xaaaaaaaa, RW1d; \ - xorl RW1d, left##1d; \ - xorl RW1d, right##1d; \ - roll $1, left##1d; \ - expand_to_64bits(right##1, RT3); \ - expand_to_64bits(left##1, RT3); \ - movl left##2d, RW2d; \ - roll $1, right##2d; \ - xorl right##2d, RW2d; \ - andl $0xaaaaaaaa, RW2d; \ - xorl RW2d, left##2d; \ - xorl RW2d, right##2d; \ - roll $1, left##2d; \ - expand_to_64bits(right##2, RT3); \ - expand_to_64bits(left##2, RT3); - -#define final_permutation3(left, right) \ - compress_to_64bits(right##0); \ - compress_to_64bits(left##0); \ - movl right##0d, RW0d; \ - rorl $1, left##0d; \ - xorl left##0d, RW0d; \ - andl $0xaaaaaaaa, RW0d; \ - xorl RW0d, right##0d; \ - xorl RW0d, left##0d; \ - rorl $1, right##0d; \ - compress_to_64bits(right##1); \ - compress_to_64bits(left##1); \ - movl right##1d, RW1d; \ - rorl $1, left##1d; \ - xorl left##1d, RW1d; \ - andl $0xaaaaaaaa, RW1d; \ - xorl RW1d, right##1d; \ - xorl RW1d, left##1d; \ - rorl $1, right##1d; \ - compress_to_64bits(right##2); \ - compress_to_64bits(left##2); \ - movl right##2d, RW2d; \ - rorl $1, left##2d; \ - xorl left##2d, RW2d; \ - andl $0xaaaaaaaa, RW2d; \ - xorl RW2d, right##2d; \ - xorl RW2d, left##2d; \ - rorl $1, right##2d; \ - \ - do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ - do_permutation(right##0d, left##0d, 2, 0x33333333); \ - do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ - do_permutation(right##1d, left##1d, 2, 0x33333333); \ - do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ - do_permutation(right##2d, left##2d, 2, 0x33333333); \ - \ - do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ - do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ - do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ - do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ - do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ - do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); - -#define round3(n, from, to, load_next_key, do_movq) \ - xorq from##0, RW0; \ - movzbl RW0bl, RT3d; \ - movzbl RW0bh, RT1d; \ - shrq $16, RW0; \ - leaq s8(%rip), RT2; \ - xorq (RT2, RT3, 8), to##0; \ - leaq s6(%rip), RT2; \ - xorq (RT2, RT1, 8), to##0; \ - movzbl RW0bl, RT3d; \ - movzbl RW0bh, RT1d; \ - shrq $16, RW0; \ - leaq s4(%rip), RT2; \ - xorq (RT2, RT3, 8), to##0; \ - leaq s2(%rip), RT2; \ - xorq (RT2, RT1, 8), to##0; \ - movzbl RW0bl, RT3d; \ - movzbl RW0bh, RT1d; \ - shrl $16, RW0d; \ - leaq s7(%rip), RT2; \ - xorq (RT2, RT3, 8), to##0; \ - leaq s5(%rip), RT2; \ - xorq (RT2, RT1, 8), to##0; \ - movzbl RW0bl, RT3d; \ - movzbl RW0bh, RT1d; \ - load_next_key(n, RW0); \ - leaq s3(%rip), RT2; \ - xorq (RT2, RT3, 8), to##0; \ - leaq s1(%rip), RT2; \ - xorq (RT2, RT1, 8), to##0; \ - xorq from##1, RW1; \ - movzbl RW1bl, RT3d; \ - movzbl RW1bh, RT1d; \ - shrq $16, RW1; \ - leaq s8(%rip), RT2; \ - xorq (RT2, RT3, 8), to##1; \ - leaq s6(%rip), RT2; \ - xorq (RT2, RT1, 8), to##1; \ - movzbl RW1bl, RT3d; \ - movzbl RW1bh, RT1d; \ - shrq $16, RW1; \ - leaq s4(%rip), RT2; \ - xorq (RT2, RT3, 8), to##1; \ - leaq s2(%rip), RT2; \ - xorq (RT2, RT1, 8), to##1; \ - movzbl RW1bl, RT3d; \ - movzbl RW1bh, RT1d; \ - shrl $16, RW1d; \ - leaq s7(%rip), RT2; \ - xorq (RT2, RT3, 8), to##1; \ - leaq s5(%rip), RT2; \ - xorq (RT2, RT1, 8), to##1; \ - movzbl RW1bl, RT3d; \ - movzbl RW1bh, RT1d; \ - do_movq(RW0, RW1); \ - leaq s3(%rip), RT2; \ - xorq (RT2, RT3, 8), to##1; \ - leaq s1(%rip), RT2; \ - xorq (RT2, RT1, 8), to##1; \ - xorq from##2, RW2; \ - movzbl RW2bl, RT3d; \ - movzbl RW2bh, RT1d; \ - shrq $16, RW2; \ - leaq s8(%rip), RT2; \ - xorq (RT2, RT3, 8), to##2; \ - leaq s6(%rip), RT2; \ - xorq (RT2, RT1, 8), to##2; \ - movzbl RW2bl, RT3d; \ - movzbl RW2bh, RT1d; \ - shrq $16, RW2; \ - leaq s4(%rip), RT2; \ - xorq (RT2, RT3, 8), to##2; \ - leaq s2(%rip), RT2; \ - xorq (RT2, RT1, 8), to##2; \ - movzbl RW2bl, RT3d; \ - movzbl RW2bh, RT1d; \ - shrl $16, RW2d; \ - leaq s7(%rip), RT2; \ - xorq (RT2, RT3, 8), to##2; \ - leaq s5(%rip), RT2; \ - xorq (RT2, RT1, 8), to##2; \ - movzbl RW2bl, RT3d; \ - movzbl RW2bh, RT1d; \ - do_movq(RW0, RW2); \ - leaq s3(%rip), RT2; \ - xorq (RT2, RT3, 8), to##2; \ - leaq s1(%rip), RT2; \ - xorq (RT2, RT1, 8), to##2; - -#define __movq(src, dst) \ - movq src, dst; - -SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) - /* input: - * %rdi: ctx, round keys - * %rsi: dst (3 blocks) - * %rdx: src (3 blocks) - */ - - pushq %rbx; - pushq %r12; - pushq %r13; - pushq %r14; - pushq %r15; - - pushq %rsi /* dst */ - - /* load input */ - movl 0 * 4(%rdx), RL0d; - movl 1 * 4(%rdx), RR0d; - movl 2 * 4(%rdx), RL1d; - movl 3 * 4(%rdx), RR1d; - movl 4 * 4(%rdx), RL2d; - movl 5 * 4(%rdx), RR2d; - - bswapl RL0d; - bswapl RR0d; - bswapl RL1d; - bswapl RR1d; - bswapl RL2d; - bswapl RR2d; - - initial_permutation3(RL, RR); - - movq 0(CTX), RW0; - movq RW0, RW1; - movq RW0, RW2; - - round3(0, RR, RL, load_next_key, __movq); - round3(1, RL, RR, load_next_key, __movq); - round3(2, RR, RL, load_next_key, __movq); - round3(3, RL, RR, load_next_key, __movq); - round3(4, RR, RL, load_next_key, __movq); - round3(5, RL, RR, load_next_key, __movq); - round3(6, RR, RL, load_next_key, __movq); - round3(7, RL, RR, load_next_key, __movq); - round3(8, RR, RL, load_next_key, __movq); - round3(9, RL, RR, load_next_key, __movq); - round3(10, RR, RL, load_next_key, __movq); - round3(11, RL, RR, load_next_key, __movq); - round3(12, RR, RL, load_next_key, __movq); - round3(13, RL, RR, load_next_key, __movq); - round3(14, RR, RL, load_next_key, __movq); - round3(15, RL, RR, load_next_key, __movq); - - round3(16+0, RL, RR, load_next_key, __movq); - round3(16+1, RR, RL, load_next_key, __movq); - round3(16+2, RL, RR, load_next_key, __movq); - round3(16+3, RR, RL, load_next_key, __movq); - round3(16+4, RL, RR, load_next_key, __movq); - round3(16+5, RR, RL, load_next_key, __movq); - round3(16+6, RL, RR, load_next_key, __movq); - round3(16+7, RR, RL, load_next_key, __movq); - round3(16+8, RL, RR, load_next_key, __movq); - round3(16+9, RR, RL, load_next_key, __movq); - round3(16+10, RL, RR, load_next_key, __movq); - round3(16+11, RR, RL, load_next_key, __movq); - round3(16+12, RL, RR, load_next_key, __movq); - round3(16+13, RR, RL, load_next_key, __movq); - round3(16+14, RL, RR, load_next_key, __movq); - round3(16+15, RR, RL, load_next_key, __movq); - - round3(32+0, RR, RL, load_next_key, __movq); - round3(32+1, RL, RR, load_next_key, __movq); - round3(32+2, RR, RL, load_next_key, __movq); - round3(32+3, RL, RR, load_next_key, __movq); - round3(32+4, RR, RL, load_next_key, __movq); - round3(32+5, RL, RR, load_next_key, __movq); - round3(32+6, RR, RL, load_next_key, __movq); - round3(32+7, RL, RR, load_next_key, __movq); - round3(32+8, RR, RL, load_next_key, __movq); - round3(32+9, RL, RR, load_next_key, __movq); - round3(32+10, RR, RL, load_next_key, __movq); - round3(32+11, RL, RR, load_next_key, __movq); - round3(32+12, RR, RL, load_next_key, __movq); - round3(32+13, RL, RR, load_next_key, __movq); - round3(32+14, RR, RL, load_next_key, __movq); - round3(32+15, RL, RR, dummy2, dummy2); - - final_permutation3(RR, RL); - - bswapl RR0d; - bswapl RL0d; - bswapl RR1d; - bswapl RL1d; - bswapl RR2d; - bswapl RL2d; - - popq %rsi /* dst */ - movl RR0d, 0 * 4(%rsi); - movl RL0d, 1 * 4(%rsi); - movl RR1d, 2 * 4(%rsi); - movl RL1d, 3 * 4(%rsi); - movl RR2d, 4 * 4(%rsi); - movl RL2d, 5 * 4(%rsi); - - popq %r15; - popq %r14; - popq %r13; - popq %r12; - popq %rbx; - - RET; -SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) - -.section .rodata, "a", @progbits -.align 16 -.L_s1: - .quad 0x0010100001010400, 0x0000000000000000 - .quad 0x0000100000010000, 0x0010100001010404 - .quad 0x0010100001010004, 0x0000100000010404 - .quad 0x0000000000000004, 0x0000100000010000 - .quad 0x0000000000000400, 0x0010100001010400 - .quad 0x0010100001010404, 0x0000000000000400 - .quad 0x0010000001000404, 0x0010100001010004 - .quad 0x0010000001000000, 0x0000000000000004 - .quad 0x0000000000000404, 0x0010000001000400 - .quad 0x0010000001000400, 0x0000100000010400 - .quad 0x0000100000010400, 0x0010100001010000 - .quad 0x0010100001010000, 0x0010000001000404 - .quad 0x0000100000010004, 0x0010000001000004 - .quad 0x0010000001000004, 0x0000100000010004 - .quad 0x0000000000000000, 0x0000000000000404 - .quad 0x0000100000010404, 0x0010000001000000 - .quad 0x0000100000010000, 0x0010100001010404 - .quad 0x0000000000000004, 0x0010100001010000 - .quad 0x0010100001010400, 0x0010000001000000 - .quad 0x0010000001000000, 0x0000000000000400 - .quad 0x0010100001010004, 0x0000100000010000 - .quad 0x0000100000010400, 0x0010000001000004 - .quad 0x0000000000000400, 0x0000000000000004 - .quad 0x0010000001000404, 0x0000100000010404 - .quad 0x0010100001010404, 0x0000100000010004 - .quad 0x0010100001010000, 0x0010000001000404 - .quad 0x0010000001000004, 0x0000000000000404 - .quad 0x0000100000010404, 0x0010100001010400 - .quad 0x0000000000000404, 0x0010000001000400 - .quad 0x0010000001000400, 0x0000000000000000 - .quad 0x0000100000010004, 0x0000100000010400 - .quad 0x0000000000000000, 0x0010100001010004 -.L_s2: - .quad 0x0801080200100020, 0x0800080000000000 - .quad 0x0000080000000000, 0x0001080200100020 - .quad 0x0001000000100000, 0x0000000200000020 - .quad 0x0801000200100020, 0x0800080200000020 - .quad 0x0800000200000020, 0x0801080200100020 - .quad 0x0801080000100000, 0x0800000000000000 - .quad 0x0800080000000000, 0x0001000000100000 - .quad 0x0000000200000020, 0x0801000200100020 - .quad 0x0001080000100000, 0x0001000200100020 - .quad 0x0800080200000020, 0x0000000000000000 - .quad 0x0800000000000000, 0x0000080000000000 - .quad 0x0001080200100020, 0x0801000000100000 - .quad 0x0001000200100020, 0x0800000200000020 - .quad 0x0000000000000000, 0x0001080000100000 - .quad 0x0000080200000020, 0x0801080000100000 - .quad 0x0801000000100000, 0x0000080200000020 - .quad 0x0000000000000000, 0x0001080200100020 - .quad 0x0801000200100020, 0x0001000000100000 - .quad 0x0800080200000020, 0x0801000000100000 - .quad 0x0801080000100000, 0x0000080000000000 - .quad 0x0801000000100000, 0x0800080000000000 - .quad 0x0000000200000020, 0x0801080200100020 - .quad 0x0001080200100020, 0x0000000200000020 - .quad 0x0000080000000000, 0x0800000000000000 - .quad 0x0000080200000020, 0x0801080000100000 - .quad 0x0001000000100000, 0x0800000200000020 - .quad 0x0001000200100020, 0x0800080200000020 - .quad 0x0800000200000020, 0x0001000200100020 - .quad 0x0001080000100000, 0x0000000000000000 - .quad 0x0800080000000000, 0x0000080200000020 - .quad 0x0800000000000000, 0x0801000200100020 - .quad 0x0801080200100020, 0x0001080000100000 -.L_s3: - .quad 0x0000002000000208, 0x0000202008020200 - .quad 0x0000000000000000, 0x0000200008020008 - .quad 0x0000002008000200, 0x0000000000000000 - .quad 0x0000202000020208, 0x0000002008000200 - .quad 0x0000200000020008, 0x0000000008000008 - .quad 0x0000000008000008, 0x0000200000020000 - .quad 0x0000202008020208, 0x0000200000020008 - .quad 0x0000200008020000, 0x0000002000000208 - .quad 0x0000000008000000, 0x0000000000000008 - .quad 0x0000202008020200, 0x0000002000000200 - .quad 0x0000202000020200, 0x0000200008020000 - .quad 0x0000200008020008, 0x0000202000020208 - .quad 0x0000002008000208, 0x0000202000020200 - .quad 0x0000200000020000, 0x0000002008000208 - .quad 0x0000000000000008, 0x0000202008020208 - .quad 0x0000002000000200, 0x0000000008000000 - .quad 0x0000202008020200, 0x0000000008000000 - .quad 0x0000200000020008, 0x0000002000000208 - .quad 0x0000200000020000, 0x0000202008020200 - .quad 0x0000002008000200, 0x0000000000000000 - .quad 0x0000002000000200, 0x0000200000020008 - .quad 0x0000202008020208, 0x0000002008000200 - .quad 0x0000000008000008, 0x0000002000000200 - .quad 0x0000000000000000, 0x0000200008020008 - .quad 0x0000002008000208, 0x0000200000020000 - .quad 0x0000000008000000, 0x0000202008020208 - .quad 0x0000000000000008, 0x0000202000020208 - .quad 0x0000202000020200, 0x0000000008000008 - .quad 0x0000200008020000, 0x0000002008000208 - .quad 0x0000002000000208, 0x0000200008020000 - .quad 0x0000202000020208, 0x0000000000000008 - .quad 0x0000200008020008, 0x0000202000020200 -.L_s4: - .quad 0x1008020000002001, 0x1000020800002001 - .quad 0x1000020800002001, 0x0000000800000000 - .quad 0x0008020800002000, 0x1008000800000001 - .quad 0x1008000000000001, 0x1000020000002001 - .quad 0x0000000000000000, 0x0008020000002000 - .quad 0x0008020000002000, 0x1008020800002001 - .quad 0x1000000800000001, 0x0000000000000000 - .quad 0x0008000800000000, 0x1008000000000001 - .quad 0x1000000000000001, 0x0000020000002000 - .quad 0x0008000000000000, 0x1008020000002001 - .quad 0x0000000800000000, 0x0008000000000000 - .quad 0x1000020000002001, 0x0000020800002000 - .quad 0x1008000800000001, 0x1000000000000001 - .quad 0x0000020800002000, 0x0008000800000000 - .quad 0x0000020000002000, 0x0008020800002000 - .quad 0x1008020800002001, 0x1000000800000001 - .quad 0x0008000800000000, 0x1008000000000001 - .quad 0x0008020000002000, 0x1008020800002001 - .quad 0x1000000800000001, 0x0000000000000000 - .quad 0x0000000000000000, 0x0008020000002000 - .quad 0x0000020800002000, 0x0008000800000000 - .quad 0x1008000800000001, 0x1000000000000001 - .quad 0x1008020000002001, 0x1000020800002001 - .quad 0x1000020800002001, 0x0000000800000000 - .quad 0x1008020800002001, 0x1000000800000001 - .quad 0x1000000000000001, 0x0000020000002000 - .quad 0x1008000000000001, 0x1000020000002001 - .quad 0x0008020800002000, 0x1008000800000001 - .quad 0x1000020000002001, 0x0000020800002000 - .quad 0x0008000000000000, 0x1008020000002001 - .quad 0x0000000800000000, 0x0008000000000000 - .quad 0x0000020000002000, 0x0008020800002000 -.L_s5: - .quad 0x0000001000000100, 0x0020001002080100 - .quad 0x0020000002080000, 0x0420001002000100 - .quad 0x0000000000080000, 0x0000001000000100 - .quad 0x0400000000000000, 0x0020000002080000 - .quad 0x0400001000080100, 0x0000000000080000 - .quad 0x0020001002000100, 0x0400001000080100 - .quad 0x0420001002000100, 0x0420000002080000 - .quad 0x0000001000080100, 0x0400000000000000 - .quad 0x0020000002000000, 0x0400000000080000 - .quad 0x0400000000080000, 0x0000000000000000 - .quad 0x0400001000000100, 0x0420001002080100 - .quad 0x0420001002080100, 0x0020001002000100 - .quad 0x0420000002080000, 0x0400001000000100 - .quad 0x0000000000000000, 0x0420000002000000 - .quad 0x0020001002080100, 0x0020000002000000 - .quad 0x0420000002000000, 0x0000001000080100 - .quad 0x0000000000080000, 0x0420001002000100 - .quad 0x0000001000000100, 0x0020000002000000 - .quad 0x0400000000000000, 0x0020000002080000 - .quad 0x0420001002000100, 0x0400001000080100 - .quad 0x0020001002000100, 0x0400000000000000 - .quad 0x0420000002080000, 0x0020001002080100 - .quad 0x0400001000080100, 0x0000001000000100 - .quad 0x0020000002000000, 0x0420000002080000 - .quad 0x0420001002080100, 0x0000001000080100 - .quad 0x0420000002000000, 0x0420001002080100 - .quad 0x0020000002080000, 0x0000000000000000 - .quad 0x0400000000080000, 0x0420000002000000 - .quad 0x0000001000080100, 0x0020001002000100 - .quad 0x0400001000000100, 0x0000000000080000 - .quad 0x0000000000000000, 0x0400000000080000 - .quad 0x0020001002080100, 0x0400001000000100 -.L_s6: - .quad 0x0200000120000010, 0x0204000020000000 - .quad 0x0000040000000000, 0x0204040120000010 - .quad 0x0204000020000000, 0x0000000100000010 - .quad 0x0204040120000010, 0x0004000000000000 - .quad 0x0200040020000000, 0x0004040100000010 - .quad 0x0004000000000000, 0x0200000120000010 - .quad 0x0004000100000010, 0x0200040020000000 - .quad 0x0200000020000000, 0x0000040100000010 - .quad 0x0000000000000000, 0x0004000100000010 - .quad 0x0200040120000010, 0x0000040000000000 - .quad 0x0004040000000000, 0x0200040120000010 - .quad 0x0000000100000010, 0x0204000120000010 - .quad 0x0204000120000010, 0x0000000000000000 - .quad 0x0004040100000010, 0x0204040020000000 - .quad 0x0000040100000010, 0x0004040000000000 - .quad 0x0204040020000000, 0x0200000020000000 - .quad 0x0200040020000000, 0x0000000100000010 - .quad 0x0204000120000010, 0x0004040000000000 - .quad 0x0204040120000010, 0x0004000000000000 - .quad 0x0000040100000010, 0x0200000120000010 - .quad 0x0004000000000000, 0x0200040020000000 - .quad 0x0200000020000000, 0x0000040100000010 - .quad 0x0200000120000010, 0x0204040120000010 - .quad 0x0004040000000000, 0x0204000020000000 - .quad 0x0004040100000010, 0x0204040020000000 - .quad 0x0000000000000000, 0x0204000120000010 - .quad 0x0000000100000010, 0x0000040000000000 - .quad 0x0204000020000000, 0x0004040100000010 - .quad 0x0000040000000000, 0x0004000100000010 - .quad 0x0200040120000010, 0x0000000000000000 - .quad 0x0204040020000000, 0x0200000020000000 - .quad 0x0004000100000010, 0x0200040120000010 -.L_s7: - .quad 0x0002000000200000, 0x2002000004200002 - .quad 0x2000000004000802, 0x0000000000000000 - .quad 0x0000000000000800, 0x2000000004000802 - .quad 0x2002000000200802, 0x0002000004200800 - .quad 0x2002000004200802, 0x0002000000200000 - .quad 0x0000000000000000, 0x2000000004000002 - .quad 0x2000000000000002, 0x0000000004000000 - .quad 0x2002000004200002, 0x2000000000000802 - .quad 0x0000000004000800, 0x2002000000200802 - .quad 0x2002000000200002, 0x0000000004000800 - .quad 0x2000000004000002, 0x0002000004200000 - .quad 0x0002000004200800, 0x2002000000200002 - .quad 0x0002000004200000, 0x0000000000000800 - .quad 0x2000000000000802, 0x2002000004200802 - .quad 0x0002000000200800, 0x2000000000000002 - .quad 0x0000000004000000, 0x0002000000200800 - .quad 0x0000000004000000, 0x0002000000200800 - .quad 0x0002000000200000, 0x2000000004000802 - .quad 0x2000000004000802, 0x2002000004200002 - .quad 0x2002000004200002, 0x2000000000000002 - .quad 0x2002000000200002, 0x0000000004000000 - .quad 0x0000000004000800, 0x0002000000200000 - .quad 0x0002000004200800, 0x2000000000000802 - .quad 0x2002000000200802, 0x0002000004200800 - .quad 0x2000000000000802, 0x2000000004000002 - .quad 0x2002000004200802, 0x0002000004200000 - .quad 0x0002000000200800, 0x0000000000000000 - .quad 0x2000000000000002, 0x2002000004200802 - .quad 0x0000000000000000, 0x2002000000200802 - .quad 0x0002000004200000, 0x0000000000000800 - .quad 0x2000000004000002, 0x0000000004000800 - .quad 0x0000000000000800, 0x2002000000200002 -.L_s8: - .quad 0x0100010410001000, 0x0000010000001000 - .quad 0x0000000000040000, 0x0100010410041000 - .quad 0x0100000010000000, 0x0100010410001000 - .quad 0x0000000400000000, 0x0100000010000000 - .quad 0x0000000400040000, 0x0100000010040000 - .quad 0x0100010410041000, 0x0000010000041000 - .quad 0x0100010010041000, 0x0000010400041000 - .quad 0x0000010000001000, 0x0000000400000000 - .quad 0x0100000010040000, 0x0100000410000000 - .quad 0x0100010010001000, 0x0000010400001000 - .quad 0x0000010000041000, 0x0000000400040000 - .quad 0x0100000410040000, 0x0100010010041000 - .quad 0x0000010400001000, 0x0000000000000000 - .quad 0x0000000000000000, 0x0100000410040000 - .quad 0x0100000410000000, 0x0100010010001000 - .quad 0x0000010400041000, 0x0000000000040000 - .quad 0x0000010400041000, 0x0000000000040000 - .quad 0x0100010010041000, 0x0000010000001000 - .quad 0x0000000400000000, 0x0100000410040000 - .quad 0x0000010000001000, 0x0000010400041000 - .quad 0x0100010010001000, 0x0000000400000000 - .quad 0x0100000410000000, 0x0100000010040000 - .quad 0x0100000410040000, 0x0100000010000000 - .quad 0x0000000000040000, 0x0100010410001000 - .quad 0x0000000000000000, 0x0100010410041000 - .quad 0x0000000400040000, 0x0100000410000000 - .quad 0x0100000010040000, 0x0100010010001000 - .quad 0x0100010410001000, 0x0000000000000000 - .quad 0x0100010410041000, 0x0000010000041000 - .quad 0x0000010000041000, 0x0000010400001000 - .quad 0x0000010400001000, 0x0000000400040000 - .quad 0x0100000010000000, 0x0100010010041000 diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c deleted file mode 100644 index 34600f90d8a6..000000000000 --- a/arch/x86/crypto/des3_ede_glue.c +++ /dev/null @@ -1,391 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue Code for assembler optimized version of 3DES - * - * Copyright © 2014 Jussi Kivilinna - * - * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: - * Copyright (c) 2006 Herbert Xu - */ - -#include -#include -#include -#include -#include -#include -#include - -struct des3_ede_x86_ctx { - struct des3_ede_ctx enc; - struct des3_ede_ctx dec; -}; - -/* regular block cipher functions */ -asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, - const u8 *src); - -/* 3-way parallel cipher functions */ -asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, - const u8 *src); - -static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, - const u8 *src) -{ - u32 *enc_ctx = ctx->enc.expkey; - - des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); -} - -static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, - const u8 *src) -{ - u32 *dec_ctx = ctx->dec.expkey; - - des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); -} - -static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, - const u8 *src) -{ - u32 *dec_ctx = ctx->dec.expkey; - - des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); -} - -static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); -} - -static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); -} - -static int ecb_crypt(struct skcipher_request *req, const u32 *expkey) -{ - const unsigned int bsize = DES3_EDE_BLOCK_SIZE; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u8 *wsrc = walk.src.virt.addr; - u8 *wdst = walk.dst.virt.addr; - - /* Process four block batch */ - if (nbytes >= bsize * 3) { - do { - des3_ede_x86_64_crypt_blk_3way(expkey, wdst, - wsrc); - - wsrc += bsize * 3; - wdst += bsize * 3; - nbytes -= bsize * 3; - } while (nbytes >= bsize * 3); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); - - wsrc += bsize; - wdst += bsize; - nbytes -= bsize; - } while (nbytes >= bsize); - -done: - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int ecb_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - - return ecb_crypt(req, ctx->enc.expkey); -} - -static int ecb_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - - return ecb_crypt(req, ctx->dec.expkey); -} - -static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - unsigned int bsize = DES3_EDE_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 *iv = (u64 *)walk->iv; - - do { - *dst = *src ^ *iv; - des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); - iv = dst; - - src += 1; - dst += 1; - nbytes -= bsize; - } while (nbytes >= bsize); - - *(u64 *)walk->iv = *iv; - return nbytes; -} - -static int cbc_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - nbytes = __cbc_encrypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - unsigned int bsize = DES3_EDE_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 ivs[3 - 1]; - u64 last_iv; - - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; - - last_iv = *src; - - /* Process four block batch */ - if (nbytes >= bsize * 3) { - do { - nbytes -= bsize * 3 - bsize; - src -= 3 - 1; - dst -= 3 - 1; - - ivs[0] = src[0]; - ivs[1] = src[1]; - - des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); - - dst[1] ^= ivs[0]; - dst[2] ^= ivs[1]; - - nbytes -= bsize; - if (nbytes < bsize) - goto done; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } while (nbytes >= bsize * 3); - } - - /* Handle leftovers */ - for (;;) { - des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); - - nbytes -= bsize; - if (nbytes < bsize) - break; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } - -done: - *dst ^= *(u64 *)walk->iv; - *(u64 *)walk->iv = last_iv; - - return nbytes; -} - -static int cbc_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - nbytes = __cbc_decrypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); - u32 i, j, tmp; - int err; - - err = des3_ede_expand_key(&ctx->enc, key, keylen); - if (err == -ENOKEY) { - if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) - err = -EINVAL; - else - err = 0; - } - - if (err) { - memset(ctx, 0, sizeof(*ctx)); - return err; - } - - /* Fix encryption context for this implementation and form decryption - * context. */ - j = DES3_EDE_EXPKEY_WORDS - 2; - for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { - tmp = ror32(ctx->enc.expkey[i + 1], 4); - ctx->enc.expkey[i + 1] = tmp; - - ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0]; - ctx->dec.expkey[j + 1] = tmp; - } - - return 0; -} - -static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm, - const u8 *key, - unsigned int keylen) -{ - return des3_ede_x86_setkey(&tfm->base, key, keylen); -} - -static struct crypto_alg des3_ede_cipher = { - .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-asm", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES3_EDE_KEY_SIZE, - .cia_max_keysize = DES3_EDE_KEY_SIZE, - .cia_setkey = des3_ede_x86_setkey, - .cia_encrypt = des3_ede_x86_encrypt, - .cia_decrypt = des3_ede_x86_decrypt, - } - } -}; - -static struct skcipher_alg des3_ede_skciphers[] = { - { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3_ede-asm", - .base.cra_priority = 300, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = des3_ede_x86_setkey_skcipher, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3_ede-asm", - .base.cra_priority = 300, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = des3_ede_x86_setkey_skcipher, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - } -}; - -static bool is_blacklisted_cpu(void) -{ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - return false; - - if (boot_cpu_data.x86 == 0x0f) { - /* - * On Pentium 4, des3_ede-x86_64 is slower than generic C - * implementation because use of 64bit rotates (which are really - * slow on P4). Therefore blacklist P4s. - */ - return true; - } - - return false; -} - -static int force; -module_param(force, int, 0); -MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); - -static int __init des3_ede_x86_init(void) -{ - int err; - - if (!force && is_blacklisted_cpu()) { - pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); - return -ENODEV; - } - - err = crypto_register_alg(&des3_ede_cipher); - if (err) - return err; - - err = crypto_register_skciphers(des3_ede_skciphers, - ARRAY_SIZE(des3_ede_skciphers)); - if (err) - crypto_unregister_alg(&des3_ede_cipher); - - return err; -} - -static void __exit des3_ede_x86_fini(void) -{ - crypto_unregister_alg(&des3_ede_cipher); - crypto_unregister_skciphers(des3_ede_skciphers, - ARRAY_SIZE(des3_ede_skciphers)); -} - -module_init(des3_ede_x86_init); -module_exit(des3_ede_x86_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); -MODULE_ALIAS_CRYPTO("des3_ede"); -MODULE_ALIAS_CRYPTO("des3_ede-asm"); -MODULE_AUTHOR("Jussi Kivilinna "); From 5469d16e71d65e2a71b68e0bb46eae63f9cb8965 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 27 Mar 2026 01:17:27 +0100 Subject: [PATCH 105/129] crypto: kconfig - fix typos in atmel-ecc and atmel-sha204a help s/Microhip/Microchip/ Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8d3b5d2890f8..16fa56898d35 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -490,7 +490,7 @@ config CRYPTO_DEV_ATMEL_ECC select CRYPTO_ECDH select CRC16 help - Microhip / Atmel ECC hw accelerator. + Microchip / Atmel ECC hw accelerator. Select this if you want to use the Microchip / Atmel module for ECDH algorithm. @@ -504,7 +504,7 @@ config CRYPTO_DEV_ATMEL_SHA204A select HW_RANDOM select CRC16 help - Microhip / Atmel SHA accelerator and RNG. + Microchip / Atmel SHA accelerator and RNG. Select this if you want to use the Microchip / Atmel SHA204A module as a random number generator. (Other functions of the chip are currently not exposed by this driver) From 51d0f5020deeef1c678b7e3e71420b2842215662 Mon Sep 17 00:00:00 2001 From: Paul Louvel Date: Fri, 27 Mar 2026 10:24:18 +0100 Subject: [PATCH 106/129] crypto: aspeed - Use memcpy_from_sglist() in aspeed_ahash_dma_prepare() Replace scatterwalk_map_and_copy() with memcpy_from_sglist() in aspeed_ahash_dma_prepare(). The latter provides a simpler interface without requiring a direction parameter, making the code easier to read and less error-prone. No functional change intended. Signed-off-by: Paul Louvel Reviewed-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace-hash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index f8f37c9d5f3c..6f0d03cfbefc 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -182,8 +182,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev) final = true; } else length -= remain; - scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg, - rctx->offset, length, 0); + memcpy_from_sglist(hash_engine->ahash_src_addr, rctx->src_sg, rctx->offset, length); aspeed_ahash_update_counter(rctx, length); if (final) length += aspeed_ahash_fill_padding( From cdadc14359378c0cf02251c86ffbc606f55bfadd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 27 Mar 2026 16:08:18 -0700 Subject: [PATCH 107/129] crypto: cryptomgr - Select algorithm types only when CRYPTO_SELFTESTS Enabling any template selects CRYPTO_MANAGER, which causes CRYPTO_MANAGER2 to enable itself, which selects every algorithm type option. However, pulling in all algorithm types is needed only when the self-tests are enabled. So condition the selections accordingly. To make this possible, also add the missing selections to various symbols that were relying on transitive selections via CRYPTO_MANAGER. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 27 +++++++++++++++++++-------- drivers/crypto/Kconfig | 1 + drivers/crypto/allwinner/Kconfig | 2 ++ drivers/crypto/intel/qat/Kconfig | 1 + 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 13686f033413..0731ceab8d9b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -153,19 +153,20 @@ config CRYPTO_MANAGER config CRYPTO_MANAGER2 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) - select CRYPTO_ACOMP2 - select CRYPTO_AEAD2 - select CRYPTO_AKCIPHER2 - select CRYPTO_SIG2 - select CRYPTO_HASH2 - select CRYPTO_KPP2 - select CRYPTO_RNG2 - select CRYPTO_SKCIPHER2 + select CRYPTO_ACOMP2 if CRYPTO_SELFTESTS + select CRYPTO_AEAD2 if CRYPTO_SELFTESTS + select CRYPTO_AKCIPHER2 if CRYPTO_SELFTESTS + select CRYPTO_SIG2 if CRYPTO_SELFTESTS + select CRYPTO_HASH2 if CRYPTO_SELFTESTS + select CRYPTO_KPP2 if CRYPTO_SELFTESTS + select CRYPTO_RNG2 if CRYPTO_SELFTESTS + select CRYPTO_SKCIPHER2 if CRYPTO_SELFTESTS config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" depends on NET select CRYPTO_MANAGER + select CRYPTO_RNG help Userspace configuration for cryptographic instantiations such as cbc(aes). @@ -220,6 +221,7 @@ config CRYPTO_PCRYPT config CRYPTO_CRYPTD tristate "Software async crypto daemon" + select CRYPTO_AEAD select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_MANAGER @@ -253,7 +255,10 @@ config CRYPTO_KRB5ENC config CRYPTO_BENCHMARK tristate "Crypto benchmarking module" depends on m || EXPERT + select CRYPTO_AEAD + select CRYPTO_HASH select CRYPTO_MANAGER + select CRYPTO_SKCIPHER help Quick & dirty crypto benchmarking module. @@ -263,10 +268,16 @@ config CRYPTO_BENCHMARK config CRYPTO_SIMD tristate + select CRYPTO_AEAD select CRYPTO_CRYPTD config CRYPTO_ENGINE tristate + select CRYPTO_AEAD + select CRYPTO_AKCIPHER + select CRYPTO_HASH + select CRYPTO_KPP + select CRYPTO_SKCIPHER endmenu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 16fa56898d35..189f5beb8bfa 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -301,6 +301,7 @@ config CRYPTO_DEV_PPC4XX select CRYPTO_CCM select CRYPTO_CTR select CRYPTO_GCM + select CRYPTO_RNG select CRYPTO_SKCIPHER help This option allows you to have support for AMCC crypto acceleration. diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig index b8e75210a0e3..7270e5fbc573 100644 --- a/drivers/crypto/allwinner/Kconfig +++ b/drivers/crypto/allwinner/Kconfig @@ -14,6 +14,7 @@ config CRYPTO_DEV_SUN4I_SS select CRYPTO_SHA1 select CRYPTO_AES select CRYPTO_LIB_DES + select CRYPTO_RNG select CRYPTO_SKCIPHER help Some Allwinner SoC have a crypto accelerator named @@ -49,6 +50,7 @@ config CRYPTO_DEV_SUN8I_CE select CRYPTO_CBC select CRYPTO_AES select CRYPTO_DES + select CRYPTO_RNG depends on CRYPTO_DEV_ALLWINNER depends on PM help diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 4b4861460dd4..6734b746ea70 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config CRYPTO_DEV_QAT tristate + select CRYPTO_ACOMP select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_SKCIPHER From 3a31b7fda695da81a2e2fe6812af94e8bf61624a Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 28 Mar 2026 11:20:44 +0100 Subject: [PATCH 108/129] crypto: img-hash - use list_first_entry_or_null to simplify digest Use list_first_entry_or_null() to simplify img_hash_digest() and remove the now-unused local 'struct img_hash_dev *' variables. Use 'ctx->hdev' when calling img_hash_handle_queue() instead of 'tctx->hdev'. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 7195c37dd102..f2d00b1d6b24 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -629,24 +629,15 @@ static int img_hash_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm); struct img_hash_request_ctx *ctx = ahash_request_ctx(req); - struct img_hash_dev *hdev = NULL; - struct img_hash_dev *tmp; int err; spin_lock(&img_hash.lock); - if (!tctx->hdev) { - list_for_each_entry(tmp, &img_hash.dev_list, list) { - hdev = tmp; - break; - } - tctx->hdev = hdev; - - } else { - hdev = tctx->hdev; - } - + if (!tctx->hdev) + tctx->hdev = list_first_entry_or_null(&img_hash.dev_list, + struct img_hash_dev, list); + ctx->hdev = tctx->hdev; spin_unlock(&img_hash.lock); - ctx->hdev = hdev; + ctx->flags = 0; ctx->digsize = crypto_ahash_digestsize(tfm); @@ -675,7 +666,7 @@ static int img_hash_digest(struct ahash_request *req) ctx->sgfirst = req->src; ctx->nents = sg_nents(ctx->sg); - err = img_hash_handle_queue(tctx->hdev, req); + err = img_hash_handle_queue(ctx->hdev, req); return err; } From f2e25a4cf57d1d24a6920d9cb0f2bcf2b434bd8c Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 28 Mar 2026 11:20:46 +0100 Subject: [PATCH 109/129] crypto: img-hash - drop redundant return variable In img_hash_digest(), remove the redundant return variable 'err' and return img_hash_handle_queue() directly. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index f2d00b1d6b24..c0467185ee42 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -629,7 +629,6 @@ static int img_hash_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm); struct img_hash_request_ctx *ctx = ahash_request_ctx(req); - int err; spin_lock(&img_hash.lock); if (!tctx->hdev) @@ -666,9 +665,7 @@ static int img_hash_digest(struct ahash_request *req) ctx->sgfirst = req->src; ctx->nents = sg_nents(ctx->sg); - err = img_hash_handle_queue(ctx->hdev, req); - - return err; + return img_hash_handle_queue(ctx->hdev, req); } static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) From 35ecb77ae0749a2f1b04872c9978d9d7ddbbeb79 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Sat, 28 Mar 2026 22:29:46 +0000 Subject: [PATCH 110/129] crypto: qat - use swab32 macro Replace __builtin_bswap32() with swab32 in icp_qat_hw_20_comp.h to fix the following build errors on architectures without native byte-swap support: alpha-linux-ld: drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.o: in function `adf_gen4_build_decomp_block': drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h:141:(.text+0xeec): undefined reference to `__bswapsi2' alpha-linux-ld: drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h:141:(.text+0xef8): undefined reference to `__bswapsi2' alpha-linux-ld: drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.o: in function `adf_gen4_build_comp_block': drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h:57:(.text+0xf64): undefined reference to `__bswapsi2' alpha-linux-ld: drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h:57:(.text+0xf7c): undefined reference to `__bswapsi2' Fixes: 5b14b2b307e4 ("crypto: qat - enable deflate for QAT GEN4") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202603290259.Ig9kDOmI-lkp@intel.com/ Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h index 7ea8962272f2..d28732225c9e 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h @@ -3,6 +3,8 @@ #ifndef _ICP_QAT_HW_20_COMP_H_ #define _ICP_QAT_HW_20_COMP_H_ +#include + #include "icp_qat_hw_20_comp_defs.h" #include "icp_qat_fw.h" @@ -54,7 +56,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK); - return __builtin_bswap32(val32); + return swab32(val32); } struct icp_qat_hw_comp_20_config_csr_upper { @@ -106,7 +108,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK); - return __builtin_bswap32(val32); + return swab32(val32); } struct icp_qat_hw_decomp_20_config_csr_lower { @@ -138,7 +140,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_l ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK); - return __builtin_bswap32(val32); + return swab32(val32); } struct icp_qat_hw_decomp_20_config_csr_upper { @@ -158,7 +160,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_u ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK); - return __builtin_bswap32(val32); + return swab32(val32); } #endif From 879a4f78ea3f8ce113fb0e99b8ead85542133ceb Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Sat, 28 Mar 2026 22:29:47 +0000 Subject: [PATCH 111/129] crypto: qat - add support for zstd Add support for the ZSTD algorithm for QAT GEN4, GEN5 and GEN6 via the acomp API. For GEN4 and GEN5, compression is performed in hardware using LZ4s, a QAT-specific variant of LZ4. The compressed output is post-processed to generate ZSTD sequences, and the ZSTD library is then used to produce the final ZSTD stream via zstd_compress_sequences_and_literals(). Only inputs between 8 KB and 512 KB are offloaded to the device. The minimum size restriction will be relaxed once polling support is added. The maximum size is limited by the use of pre-allocated per-CPU scratch buffers. On these generations, only compression is offloaded to hardware; decompression always falls back to software. For GEN6, both compression and decompression are offloaded to the accelerator, which natively supports the ZSTD algorithm. There is no limit on the input buffer size supported. However, since GEN6 is limited to a history size of 64 KB, decompression of frames compressed with a larger history falls back to software. Since GEN2 devices do not support ZSTD or LZ4s, add a mechanism that prevents selecting GEN2 compression instances for ZSTD or LZ4s when a GEN2 plug-in card is present on a system with an embedded GEN4, GEN5 or GEN6 device. In addition, modify the algorithm registration logic to allow registering the correct implementation, i.e. LZ4s based for GEN4 and GEN5 or native ZSTD for GEN6. Co-developed-by: Suman Kumar Chakraborty Signed-off-by: Suman Kumar Chakraborty Signed-off-by: Giovanni Cabiddu Reviewed-by: Laurent M Coquerel Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/Kconfig | 1 + .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_6xxx/adf_6xxx_hw_data.c | 17 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_common_drv.h | 6 +- .../intel/qat/qat_common/adf_gen4_hw_data.c | 18 +- .../crypto/intel/qat/qat_common/adf_init.c | 6 +- .../crypto/intel/qat/qat_common/icp_qat_fw.h | 7 + .../intel/qat/qat_common/icp_qat_fw_comp.h | 2 + .../crypto/intel/qat/qat_common/icp_qat_hw.h | 3 +- .../intel/qat/qat_common/qat_comp_algs.c | 524 +++++++++++++++++- .../intel/qat/qat_common/qat_comp_req.h | 9 + .../qat/qat_common/qat_comp_zstd_utils.c | 165 ++++++ .../qat/qat_common/qat_comp_zstd_utils.h | 13 + .../intel/qat/qat_common/qat_compression.c | 23 +- 17 files changed, 773 insertions(+), 30 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c create mode 100644 drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 6734b746ea70..9d6e6f52d2dc 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -12,6 +12,7 @@ config CRYPTO_DEV_QAT select CRYPTO_LIB_SHA1 select CRYPTO_LIB_SHA256 select CRYPTO_LIB_SHA512 + select CRYPTO_ZSTD select FW_LOADER select CRC8 diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 0002122219bc..19f9f738630b 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -488,6 +488,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->clock_frequency = ADF_420XX_AE_FREQ; hw_data->services_supported = adf_gen4_services_supported; hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt; + hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 900f19b90b2d..49b425be34c8 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -473,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->clock_frequency = ADF_4XXX_AE_FREQ; hw_data->services_supported = adf_gen4_services_supported; hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt; + hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c index 177bc4eb3c24..205680797e2c 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c @@ -33,6 +33,8 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) +#define ASB_MULTIPLIER 9 + struct adf_ring_config { u32 ring_mask; enum adf_cfg_service_type ring_type; @@ -509,6 +511,9 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo) case QAT_DEFLATE: header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; break; + case QAT_ZSTD: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS; + break; default: return -EINVAL; } @@ -519,6 +524,13 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo) cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + /* + * Store Auto Select Best (ASB) multiplier in the request template. + * This will be used in the data path to set the actual threshold + * value based on the input data size. + */ + req_tmpl->u3.asb_threshold.asb_value = ASB_MULTIPLIER; + return 0; } @@ -532,12 +544,16 @@ static int build_decomp_block(void *ctx, enum adf_dc_algo algo) case QAT_DEFLATE: header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; break; + case QAT_ZSTD: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS; + break; default: return -EINVAL; } cd_pars->u.sl.comp_slice_cfg_word[0] = 0; cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + req_tmpl->u3.asb_threshold.asb_value = 0; return 0; } @@ -1030,6 +1046,7 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS; hw_data->clock_frequency = ADF_6XXX_AE_FREQ; hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt; + hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD; adf_gen6_init_services_supported(hw_data); adf_gen6_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 016b81e60cfb..9478111c8437 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -41,6 +41,7 @@ intel_qat-y := adf_accel_engine.o \ qat_bl.o \ qat_comp_algs.o \ qat_compression.o \ + qat_comp_zstd_utils.o \ qat_crypto.o \ qat_hal.o \ qat_mig_dev.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index cac110215c5e..03a4e9690208 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -59,6 +59,11 @@ enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; +enum adf_accel_capabilities_ext { + ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S = BIT(0), + ADF_ACCEL_CAPABILITIES_EXT_ZSTD = BIT(1), +}; + enum adf_fuses { ADF_FUSECTL0, ADF_FUSECTL1, @@ -336,6 +341,7 @@ struct adf_hw_device_data { u32 fuses[ADF_MAX_FUSES]; u32 straps; u32 accel_capabilities_mask; + u32 accel_capabilities_ext_mask; u32 extended_dc_capabilities; u16 fw_capabilities; u32 clock_frequency; diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 6cf3a95489e8..7b8b295ac459 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -111,12 +111,12 @@ void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); -struct qat_compression_instance *qat_compression_get_instance_node(int node); +struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg); void qat_compression_put_instance(struct qat_compression_instance *inst); int qat_compression_register(void); int qat_compression_unregister(void); -int qat_comp_algs_register(void); -void qat_comp_algs_unregister(void); +int qat_comp_algs_register(u32 caps); +void qat_comp_algs_unregister(u32 caps); void qat_comp_alg_callback(void *resp); int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 349fdb323763..f4a58f04071a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -504,14 +504,20 @@ static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo) switch (algo) { case QAT_DEFLATE: header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; + hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; + break; + case QAT_LZ4S: + header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS; + hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S; + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED; + hw_comp_lower_csr.abd = ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED; break; default: return -EINVAL; } - hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; - hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; - hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; @@ -538,12 +544,16 @@ static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo) switch (algo) { case QAT_DEFLATE: header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; + break; + case QAT_LZ4S: + header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS; + hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S; break; default: return -EINVAL; } - hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index ec376583b3ae..f8088388cf12 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -180,6 +180,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; + u32 caps; int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -253,7 +254,8 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); - if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) { + caps = hw_data->accel_capabilities_ext_mask; + if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register(caps)) { dev_err(&GET_DEV(accel_dev), "Failed to register compression algs\n"); set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -308,7 +310,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) if (!list_empty(&accel_dev->compression_list) && test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status)) - qat_comp_algs_unregister(); + qat_comp_algs_unregister(hw_data->accel_capabilities_ext_mask); clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); list_for_each_entry(service, &service_table, list) { diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h index c141160421e1..2fea30a78340 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h @@ -151,6 +151,13 @@ struct icp_qat_fw_comn_resp { ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ ICP_QAT_FW_COMN_CNV_FLAG_MASK) +#define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4 +#define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK) + #define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index 81969c515a17..2526053ee630 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -8,6 +8,8 @@ enum icp_qat_fw_comp_cmd_id { ICP_QAT_FW_COMP_CMD_STATIC = 0, ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, + ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS = 10, + ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS = 11, ICP_QAT_FW_COMP_CMD_DELIMITER }; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index 0223bd541f1f..16ef6d98fa42 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -336,7 +336,8 @@ enum icp_qat_hw_compression_delayed_match { enum icp_qat_hw_compression_algo { ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1, - ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 + ICP_QAT_HW_COMPRESSION_ALGO_ZSTD = 2, + ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER }; enum icp_qat_hw_compression_depth { diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index bfc820a08ada..e0d003b50358 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_dc.h" @@ -13,9 +14,104 @@ #include "qat_comp_req.h" #include "qat_compression.h" #include "qat_algs_send.h" +#include "qat_comp_zstd_utils.h" + +#define QAT_ZSTD_SCRATCH_SIZE 524288 +#define QAT_ZSTD_MAX_BLOCK_SIZE 65535 +#define QAT_ZSTD_MAX_CONTENT_SIZE 4096 +#define QAT_LZ4S_MIN_INPUT_SIZE 8192 +#define QAT_LZ4S_MAX_OUTPUT_SIZE QAT_ZSTD_SCRATCH_SIZE +#define QAT_MAX_SEQUENCES (128 * 1024) static DEFINE_MUTEX(algs_lock); -static unsigned int active_devs; +static unsigned int active_devs_deflate; +static unsigned int active_devs_lz4s; +static unsigned int active_devs_zstd; + +struct qat_zstd_scratch { + size_t cctx_buffer_size; + void *lz4s; + void *literals; + void *out_seqs; + void *workspace; + ZSTD_CCtx *ctx; +}; + +static void *qat_zstd_alloc_scratch(void) +{ + struct qat_zstd_scratch *scratch; + ZSTD_parameters params; + size_t cctx_size; + ZSTD_CCtx *ctx; + size_t zret; + int ret; + + ret = -ENOMEM; + scratch = kzalloc_obj(*scratch); + if (!scratch) + return ERR_PTR(ret); + + scratch->lz4s = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL); + if (!scratch->lz4s) + goto error; + + scratch->literals = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL); + if (!scratch->literals) + goto error; + + scratch->out_seqs = kvcalloc(QAT_MAX_SEQUENCES, sizeof(ZSTD_Sequence), + GFP_KERNEL); + if (!scratch->out_seqs) + goto error; + + params = zstd_get_params(zstd_max_clevel(), QAT_ZSTD_SCRATCH_SIZE); + cctx_size = zstd_cctx_workspace_bound(¶ms.cParams); + + scratch->workspace = kvmalloc(cctx_size, GFP_KERNEL | __GFP_ZERO); + if (!scratch->workspace) + goto error; + + ret = -EINVAL; + ctx = zstd_init_cctx(scratch->workspace, cctx_size); + if (!ctx) + goto error; + + scratch->ctx = ctx; + scratch->cctx_buffer_size = cctx_size; + + zret = zstd_cctx_set_param(ctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters); + if (zstd_is_error(zret)) + goto error; + + return scratch; + +error: + kvfree(scratch->lz4s); + kvfree(scratch->literals); + kvfree(scratch->out_seqs); + kvfree(scratch->workspace); + kfree(scratch); + return ERR_PTR(ret); +} + +static void qat_zstd_free_scratch(void *ctx) +{ + struct qat_zstd_scratch *scratch = ctx; + + if (!scratch) + return; + + kvfree(scratch->lz4s); + kvfree(scratch->literals); + kvfree(scratch->out_seqs); + kvfree(scratch->workspace); + kfree(scratch); +} + +static struct crypto_acomp_streams qat_zstd_streams = { + .alloc_ctx = qat_zstd_alloc_scratch, + .free_ctx = qat_zstd_free_scratch, +}; enum direction { DECOMPRESSION = 0, @@ -24,10 +120,18 @@ enum direction { struct qat_compression_req; +struct qat_callback_params { + unsigned int produced; + unsigned int dlen; + bool plain; +}; + struct qat_compression_ctx { u8 comp_ctx[QAT_COMP_CTX_SIZE]; struct qat_compression_instance *inst; - int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); + int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp, + struct qat_callback_params *params); + struct crypto_acomp *ftfm; }; struct qat_compression_req { @@ -62,6 +166,7 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); struct qat_compression_instance *inst = ctx->inst; + struct qat_callback_params params = { }; int consumed, produced; s8 cmp_err, xlt_err; int res = -EBADMSG; @@ -76,6 +181,10 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, consumed = qat_comp_get_consumed_ctr(resp); produced = qat_comp_get_produced_ctr(resp); + /* Cache parameters for algorithm specific callback */ + params.produced = produced; + params.dlen = areq->dlen; + dev_dbg(&GET_DEV(accel_dev), "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d", crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), @@ -83,16 +192,20 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, status ? "ERR" : "OK ", areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err); - areq->dlen = 0; + if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) { + if (cmp_err == ERR_CODE_OVERFLOW_ERROR || xlt_err == ERR_CODE_OVERFLOW_ERROR) + res = -E2BIG; - if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) + areq->dlen = 0; goto end; + } if (qat_req->dir == COMPRESSION) { cnv = qat_comp_get_cmp_cnv_flag(resp); if (unlikely(!cnv)) { dev_err(&GET_DEV(accel_dev), "Verified compression not supported\n"); + areq->dlen = 0; goto end; } @@ -102,33 +215,36 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, dev_dbg(&GET_DEV(accel_dev), "Actual buffer overflow: produced=%d, dlen=%d\n", produced, qat_req->actual_dlen); + + res = -E2BIG; + areq->dlen = 0; goto end; } + + params.plain = !!qat_comp_get_cmp_uncomp_flag(resp); } res = 0; areq->dlen = produced; if (ctx->qat_comp_callback) - res = ctx->qat_comp_callback(qat_req, resp); + res = ctx->qat_comp_callback(qat_req, resp, ¶ms); end: qat_bl_free_bufl(accel_dev, &qat_req->buf); acomp_request_complete(areq, res); + qat_alg_send_backlog(qat_req->alg_req.backlog); } void qat_comp_alg_callback(void *resp) { struct qat_compression_req *qat_req = (void *)(__force long)qat_comp_get_opaque(resp); - struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; qat_comp_generic_callback(qat_req, resp); - - qat_alg_send_backlog(backlog); } -static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) +static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm, int alg) { struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); @@ -141,12 +257,12 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) node = tfm->node; memset(ctx, 0, sizeof(*ctx)); - inst = qat_compression_get_instance_node(node); + inst = qat_compression_get_instance_node(node, alg); if (!inst) return -EINVAL; ctx->inst = inst; - ret = qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE); + ret = qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, alg); if (ret) { qat_compression_put_instance(inst); memset(ctx, 0, sizeof(*ctx)); @@ -155,6 +271,11 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) return ret; } +static int qat_comp_alg_deflate_init_tfm(struct crypto_acomp *acomp_tfm) +{ + return qat_comp_alg_init_tfm(acomp_tfm, QAT_DEFLATE); +} + static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) { struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); @@ -237,7 +358,234 @@ static int qat_comp_alg_decompress(struct acomp_req *req) return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } -static struct acomp_alg qat_acomp[] = { { +static int qat_comp_alg_zstd_decompress(struct acomp_req *req) +{ + struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + struct acomp_req *nreq = acomp_request_ctx(req); + zstd_frame_header header; + void *buffer; + size_t zret; + int ret; + + buffer = kmap_local_page(sg_page(req->src)) + req->src->offset; + zret = zstd_get_frame_header(&header, buffer, req->src->length); + kunmap_local(buffer); + if (zret) { + dev_err(&GET_DEV(ctx->inst->accel_dev), + "ZSTD-compressed data has an incomplete frame header\n"); + return -EINVAL; + } + + if (header.windowSize > QAT_ZSTD_MAX_BLOCK_SIZE || + header.frameContentSize >= QAT_ZSTD_MAX_CONTENT_SIZE) { + dev_dbg(&GET_DEV(ctx->inst->accel_dev), "Window size=0x%llx\n", + header.windowSize); + + memcpy(nreq, req, sizeof(*req)); + acomp_request_set_tfm(nreq, ctx->ftfm); + + ret = crypto_acomp_decompress(nreq); + req->dlen = nreq->dlen; + + return ret; + } + + return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); +} + +static int qat_comp_lz4s_zstd_callback(struct qat_compression_req *qat_req, void *resp, + struct qat_callback_params *params) +{ + struct qat_compression_ctx *qat_ctx = qat_req->qat_compression_ctx; + struct acomp_req *areq = qat_req->acompress_req; + struct qat_zstd_scratch *scratch; + struct crypto_acomp_stream *s; + unsigned int lit_len = 0; + ZSTD_Sequence *out_seqs; + void *lz4s, *zstd; + size_t comp_size; + ZSTD_CCtx *ctx; + void *literals; + int seq_count; + int ret = 0; + + if (params->produced + QAT_ZSTD_LIT_COPY_LEN > QAT_ZSTD_SCRATCH_SIZE) { + dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev), + "LZ4s-ZSTD: produced size (%u) + COPY_SIZE > QAT_ZSTD_SCRATCH_SIZE (%u)\n", + params->produced, QAT_ZSTD_SCRATCH_SIZE); + areq->dlen = 0; + return -E2BIG; + } + + s = crypto_acomp_lock_stream_bh(&qat_zstd_streams); + scratch = s->ctx; + + lz4s = scratch->lz4s; + zstd = lz4s; /* Output buffer is same as lz4s */ + out_seqs = scratch->out_seqs; + ctx = scratch->ctx; + literals = scratch->literals; + + if (likely(!params->plain)) { + if (likely(sg_nents(areq->dst) == 1)) { + zstd = sg_virt(areq->dst); + lz4s = zstd; + } else { + memcpy_from_sglist(lz4s, areq->dst, 0, params->produced); + } + + seq_count = qat_alg_dec_lz4s(out_seqs, QAT_MAX_SEQUENCES, lz4s, + params->produced, literals, &lit_len); + if (seq_count < 0) { + ret = seq_count; + comp_size = 0; + goto out; + } + } else { + out_seqs[0].litLength = areq->slen; + out_seqs[0].offset = 0; + out_seqs[0].matchLength = 0; + + seq_count = 1; + } + + comp_size = zstd_compress_sequences_and_literals(ctx, zstd, params->dlen, + out_seqs, seq_count, + literals, lit_len, + QAT_ZSTD_SCRATCH_SIZE, + areq->slen); + if (zstd_is_error(comp_size)) { + if (comp_size == ZSTD_error_cannotProduce_uncompressedBlock) + ret = -E2BIG; + else + ret = -EOPNOTSUPP; + + comp_size = 0; + goto out; + } + + if (comp_size > params->dlen) { + dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev), + "LZ4s-ZSTD: compressed_size (%u) > output buffer size (%u)\n", + (unsigned int)comp_size, params->dlen); + ret = -EOVERFLOW; + goto out; + } + + if (unlikely(sg_nents(areq->dst) != 1)) + memcpy_to_sglist(areq->dst, 0, zstd, comp_size); + +out: + areq->dlen = comp_size; + crypto_acomp_unlock_stream_bh(s); + + return ret; +} + +static int qat_comp_alg_lz4s_zstd_init_tfm(struct crypto_acomp *acomp_tfm) +{ + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + int reqsize; + int ret; + + /* qat_comp_alg_init_tfm() wipes out the ctx */ + ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_LZ4S); + if (ret) + return ret; + + ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK, + tfm->node); + if (IS_ERR(ctx->ftfm)) { + qat_comp_alg_exit_tfm(acomp_tfm); + return PTR_ERR(ctx->ftfm); + } + + reqsize = max(sizeof(struct qat_compression_req), + sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm)); + + acomp_tfm->reqsize = reqsize; + + ctx->qat_comp_callback = qat_comp_lz4s_zstd_callback; + + return 0; +} + +static int qat_comp_alg_zstd_init_tfm(struct crypto_acomp *acomp_tfm) +{ + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + int reqsize; + int ret; + + /* qat_comp_alg_init_tfm() wipes out the ctx */ + ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_ZSTD); + if (ret) + return ret; + + ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK, + tfm->node); + if (IS_ERR(ctx->ftfm)) { + qat_comp_alg_exit_tfm(acomp_tfm); + return PTR_ERR(ctx->ftfm); + } + + reqsize = max(sizeof(struct qat_compression_req), + sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm)); + + acomp_tfm->reqsize = reqsize; + + return 0; +} + +static void qat_comp_alg_zstd_exit_tfm(struct crypto_acomp *acomp_tfm) +{ + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + + if (ctx->ftfm) + crypto_free_acomp(ctx->ftfm); + + qat_comp_alg_exit_tfm(acomp_tfm); +} + +static int qat_comp_alg_lz4s_zstd_compress(struct acomp_req *req) +{ + struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + struct acomp_req *nreq = acomp_request_ctx(req); + int ret; + + if (req->slen >= QAT_LZ4S_MIN_INPUT_SIZE && req->dlen >= QAT_LZ4S_MIN_INPUT_SIZE && + req->slen <= QAT_LZ4S_MAX_OUTPUT_SIZE && req->dlen <= QAT_LZ4S_MAX_OUTPUT_SIZE) + return qat_comp_alg_compress(req); + + memcpy(nreq, req, sizeof(*req)); + acomp_request_set_tfm(nreq, ctx->ftfm); + + ret = crypto_acomp_compress(nreq); + req->dlen = nreq->dlen; + + return ret; +} + +static int qat_comp_alg_sw_decompress(struct acomp_req *req) +{ + struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); + struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm); + struct acomp_req *nreq = acomp_request_ctx(req); + int ret; + + memcpy(nreq, req, sizeof(*req)); + acomp_request_set_tfm(nreq, ctx->ftfm); + + ret = crypto_acomp_decompress(nreq); + req->dlen = nreq->dlen; + + return ret; +} + +static struct acomp_alg qat_acomp_deflate[] = { { .base = { .cra_name = "deflate", .cra_driver_name = "qat_deflate", @@ -247,27 +595,165 @@ static struct acomp_alg qat_acomp[] = { { .cra_reqsize = sizeof(struct qat_compression_req), .cra_module = THIS_MODULE, }, - .init = qat_comp_alg_init_tfm, + .init = qat_comp_alg_deflate_init_tfm, .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, }}; -int qat_comp_algs_register(void) +static struct acomp_alg qat_acomp_zstd_lz4s = { + .base = { + .cra_name = "zstd", + .cra_driver_name = "qat_zstd", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_reqsize = sizeof(struct qat_compression_req), + .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_module = THIS_MODULE, + }, + .init = qat_comp_alg_lz4s_zstd_init_tfm, + .exit = qat_comp_alg_zstd_exit_tfm, + .compress = qat_comp_alg_lz4s_zstd_compress, + .decompress = qat_comp_alg_sw_decompress, +}; + +static struct acomp_alg qat_acomp_zstd_native = { + .base = { + .cra_name = "zstd", + .cra_driver_name = "qat_zstd", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_reqsize = sizeof(struct qat_compression_req), + .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_module = THIS_MODULE, + }, + .init = qat_comp_alg_zstd_init_tfm, + .exit = qat_comp_alg_zstd_exit_tfm, + .compress = qat_comp_alg_compress, + .decompress = qat_comp_alg_zstd_decompress, +}; + +static int qat_comp_algs_register_deflate(void) { int ret = 0; mutex_lock(&algs_lock); - if (++active_devs == 1) - ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); + if (++active_devs_deflate == 1) { + ret = crypto_register_acomps(qat_acomp_deflate, + ARRAY_SIZE(qat_acomp_deflate)); + if (ret) + active_devs_deflate--; + } mutex_unlock(&algs_lock); + return ret; } -void qat_comp_algs_unregister(void) +static void qat_comp_algs_unregister_deflate(void) { mutex_lock(&algs_lock); - if (--active_devs == 0) - crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); + if (--active_devs_deflate == 0) + crypto_unregister_acomps(qat_acomp_deflate, ARRAY_SIZE(qat_acomp_deflate)); mutex_unlock(&algs_lock); } + +static int qat_comp_algs_register_lz4s(void) +{ + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs_lz4s == 1) { + ret = crypto_acomp_alloc_streams(&qat_zstd_streams); + if (ret) { + active_devs_lz4s--; + goto unlock; + } + + ret = crypto_register_acomp(&qat_acomp_zstd_lz4s); + if (ret) { + crypto_acomp_free_streams(&qat_zstd_streams); + active_devs_lz4s--; + } + } +unlock: + mutex_unlock(&algs_lock); + + return ret; +} + +static void qat_comp_algs_unregister_lz4s(void) +{ + mutex_lock(&algs_lock); + if (--active_devs_lz4s == 0) { + crypto_unregister_acomp(&qat_acomp_zstd_lz4s); + crypto_acomp_free_streams(&qat_zstd_streams); + } + mutex_unlock(&algs_lock); +} + +static int qat_comp_algs_register_zstd(void) +{ + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs_zstd == 1) { + ret = crypto_register_acomp(&qat_acomp_zstd_native); + if (ret) + active_devs_zstd--; + } + mutex_unlock(&algs_lock); + + return ret; +} + +static void qat_comp_algs_unregister_zstd(void) +{ + mutex_lock(&algs_lock); + if (--active_devs_zstd == 0) + crypto_unregister_acomp(&qat_acomp_zstd_native); + mutex_unlock(&algs_lock); +} + +int qat_comp_algs_register(u32 caps) +{ + int ret; + + ret = qat_comp_algs_register_deflate(); + if (ret) + return ret; + + if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S) { + ret = qat_comp_algs_register_lz4s(); + if (ret) + goto err_unregister_deflate; + } + + if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD) { + ret = qat_comp_algs_register_zstd(); + if (ret) + goto err_unregister_lz4s; + } + + return ret; + +err_unregister_lz4s: + if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S) + qat_comp_algs_unregister_lz4s(); +err_unregister_deflate: + qat_comp_algs_unregister_deflate(); + + return ret; +} + +void qat_comp_algs_unregister(u32 caps) +{ + qat_comp_algs_unregister_deflate(); + + if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S) + qat_comp_algs_unregister_lz4s(); + + if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD) + qat_comp_algs_unregister_zstd(); +} diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h index 18a1f33a6db9..f165d28aaaf4 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h @@ -23,6 +23,7 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen, fw_req->comn_mid.opaque_data = opaque; req_pars->comp_len = slen; req_pars->out_buffer_sz = dlen; + fw_req->u3.asb_threshold.asb_value *= slen >> 4; } static inline void qat_comp_create_compression_req(void *ctx, void *req, @@ -110,4 +111,12 @@ static inline u8 qat_comp_get_cmp_cnv_flag(void *resp) return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags); } +static inline u8 qat_comp_get_cmp_uncomp_flag(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + u8 flags = qat_resp->comn_resp.hdr_flags; + + return ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(flags); +} + #endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c new file mode 100644 index 000000000000..62ec2d5c3ab8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2026 Intel Corporation */ +#include +#include +#include +#include +#include + +#include "qat_comp_zstd_utils.h" + +#define ML_BITS 4 +#define ML_MASK ((1U << ML_BITS) - 1) +#define RUN_BITS (8 - ML_BITS) +#define RUN_MASK ((1U << RUN_BITS) - 1) +#define LZ4S_MINMATCH 2 + +/* + * ZSTD blocks can decompress to at most min(windowSize, 128KB) bytes. + * Insert explicit block delimiters to keep blocks within this limit. + */ +#define QAT_ZSTD_BLOCK_MAX ZSTD_BLOCKSIZE_MAX + +static int emit_delimiter(ZSTD_Sequence *out_seqs, size_t *seqs_idx, + size_t out_seqs_capacity, unsigned int lz4s_buff_size) +{ + if (*seqs_idx >= out_seqs_capacity - 1) { + pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n", + *seqs_idx, out_seqs_capacity, lz4s_buff_size); + return -EOVERFLOW; + } + + out_seqs[*seqs_idx].offset = 0; + out_seqs[*seqs_idx].litLength = 0; + out_seqs[*seqs_idx].matchLength = 0; + (*seqs_idx)++; + + return 0; +} + +int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity, + unsigned char *lz4s_buff, unsigned int lz4s_buff_size, + unsigned char *literals, unsigned int *lit_len) +{ + unsigned char *end_ip = lz4s_buff + lz4s_buff_size; + unsigned char *start, *dest, *dest_end; + unsigned int hist_literal_len = 0; + unsigned char *ip = lz4s_buff; + size_t block_decomp_size = 0; + size_t seqs_idx = 0; + int ret; + + *lit_len = 0; + + if (!lz4s_buff_size) + return 0; + + while (ip < end_ip) { + size_t literal_len = 0, match_len = 0; + const unsigned int token = *ip++; + size_t length = 0; + size_t offset = 0; + + /* Get literal length */ + length = token >> ML_BITS; + if (length == RUN_MASK) { + unsigned int s; + + do { + s = *ip++; + length += s; + } while (s == 255); + } + + literal_len = length; + + start = ip; + dest = literals; + dest_end = literals + length; + + do { + memcpy(dest, start, QAT_ZSTD_LIT_COPY_LEN); + dest += QAT_ZSTD_LIT_COPY_LEN; + start += QAT_ZSTD_LIT_COPY_LEN; + } while (dest < dest_end); + + literals += length; + *lit_len += length; + + ip += length; + if (ip == end_ip) { + literal_len += hist_literal_len; + /* + * If adding trailing literals would overflow the + * current block, close it first. + */ + if (block_decomp_size + literal_len > QAT_ZSTD_BLOCK_MAX) { + ret = emit_delimiter(out_seqs, &seqs_idx, + out_seqs_capacity, + lz4s_buff_size); + if (ret) + return ret; + } + out_seqs[seqs_idx].litLength = literal_len; + out_seqs[seqs_idx].offset = offset; + out_seqs[seqs_idx].matchLength = match_len; + break; + } + + offset = get_unaligned_le16(ip); + ip += 2; + + length = token & ML_MASK; + if (length == ML_MASK) { + unsigned int s; + + do { + s = *ip++; + length += s; + } while (s == 255); + } + if (length != 0) { + length += LZ4S_MINMATCH; + match_len = (unsigned short)length; + literal_len += hist_literal_len; + + /* + * If this sequence would push the current block past + * the ZSTD maximum, close the block first. + */ + if (block_decomp_size + literal_len + match_len > QAT_ZSTD_BLOCK_MAX) { + ret = emit_delimiter(out_seqs, &seqs_idx, + out_seqs_capacity, + lz4s_buff_size); + if (ret) + return ret; + + block_decomp_size = 0; + } + + out_seqs[seqs_idx].offset = offset; + out_seqs[seqs_idx].litLength = literal_len; + out_seqs[seqs_idx].matchLength = match_len; + hist_literal_len = 0; + seqs_idx++; + if (seqs_idx >= out_seqs_capacity - 1) { + pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n", + seqs_idx, out_seqs_capacity, lz4s_buff_size); + return -EOVERFLOW; + } + + block_decomp_size += literal_len + match_len; + } else { + if (literal_len > 0) { + /* + * When match length is 0, the literal length needs + * to be temporarily stored and processed together + * with the next data block. + */ + hist_literal_len += literal_len; + } + } + } + + return seqs_idx + 1; +} diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h new file mode 100644 index 000000000000..55c7a1b9b848 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2026 Intel Corporation */ +#ifndef QAT_COMP_ZSTD_UTILS_H_ +#define QAT_COMP_ZSTD_UTILS_H_ +#include + +#define QAT_ZSTD_LIT_COPY_LEN 8 + +int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity, + unsigned char *lz4s_buff, unsigned int lz4s_buff_size, + unsigned char *literals, unsigned int *lit_len); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c index 53a4db5507ec..1424d7a9bcd3 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.c +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c @@ -46,12 +46,14 @@ static int qat_compression_free_instances(struct adf_accel_dev *accel_dev) return 0; } -struct qat_compression_instance *qat_compression_get_instance_node(int node) +struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg) { struct qat_compression_instance *inst = NULL; + struct adf_hw_device_data *hw_data = NULL; struct adf_accel_dev *accel_dev = NULL; unsigned long best = ~0; struct list_head *itr; + u32 caps, mask; list_for_each(itr, adf_devmgr_get_head()) { struct adf_accel_dev *tmp_dev; @@ -61,6 +63,15 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node) tmp_dev = list_entry(itr, struct adf_accel_dev, list); tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev)); + if (alg == QAT_ZSTD || alg == QAT_LZ4S) { + hw_data = tmp_dev->hw_device; + caps = hw_data->accel_capabilities_ext_mask; + mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD | + ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S; + if (!(caps & mask)) + continue; + } + if ((node == tmp_dev_node || tmp_dev_node < 0) && adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) { ctr = atomic_read(&tmp_dev->ref_count); @@ -78,6 +89,16 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node) struct adf_accel_dev *tmp_dev; tmp_dev = list_entry(itr, struct adf_accel_dev, list); + + if (alg == QAT_ZSTD || alg == QAT_LZ4S) { + hw_data = tmp_dev->hw_device; + caps = hw_data->accel_capabilities_ext_mask; + mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD | + ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S; + if (!(caps & mask)) + continue; + } + if (adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) { accel_dev = tmp_dev; From 02c64052fad03699b9c6d1df2f9b444d17e4ac50 Mon Sep 17 00:00:00 2001 From: Haoxiang Li Date: Mon, 30 Mar 2026 11:34:02 +0800 Subject: [PATCH 112/129] crypto: ccree - fix a memory leak in cc_mac_digest() Add cc_unmap_result() if cc_map_hash_request_final() fails to prevent potential memory leak. Fixes: 63893811b0fc ("crypto: ccree - add ahash support") Cc: stable@vger.kernel.org Signed-off-by: Haoxiang Li Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_hash.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index c6d085c8ff79..73179bf725a7 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_request *req) if (cc_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); + cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); return -ENOMEM; } From f94f6cff1dcf9296879c7242dda8171320188ed7 Mon Sep 17 00:00:00 2001 From: Zhushuai Yin Date: Mon, 30 Mar 2026 14:25:27 +0800 Subject: [PATCH 113/129] crypto: hisilicon - fix the format string type error 1. The return value val of sec_debugfs_atomic64_get is of the u64 type, but %lld instead of %llu is used in DEFINE_DEBUGFS_ATTRIBUTE. Fix it. 2. In debugfs.c, since the types of q_depth and xeq_depth are u16, the results of q_depth - 1 and xeq_depth - 1 are int rather than u16. Use %d for int. Signed-off-by: Zhushuai Yin Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/debugfs.c | 4 ++-- drivers/crypto/hisilicon/sec2/sec_main.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 5d8b4112c543..e5878558dc64 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -305,7 +305,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, ret = kstrtou32(presult, 0, e_id); if (ret || *e_id >= q_depth) { - dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); + dev_err(dev, "Please input sqe num (0-%d)", q_depth - 1); return -EINVAL; } @@ -388,7 +388,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name) } if (xeqe_id >= xeq_depth) { - dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1); + dev_err(dev, "Please input eqe or aeqe num (0-%d)", xeq_depth - 1); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6647b7340827..056bd8f4da5a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -909,7 +909,7 @@ static int sec_debugfs_atomic64_set(void *data, u64 val) } DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, - sec_debugfs_atomic64_set, "%lld\n"); + sec_debugfs_atomic64_set, "%llu\n"); static int sec_regs_show(struct seq_file *s, void *unused) { From ff34953026dd38dd62d28847c34670f20cbea37a Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Mon, 30 Mar 2026 14:25:28 +0800 Subject: [PATCH 114/129] crypto: hisilicon/qm - add const qualifier to info_name in struct qm_cmd_dump_item The "info_name" is never changed in struct qm_cmd_dump_item, make it const. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/debugfs.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index e5878558dc64..3ee6de16e3f1 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -45,8 +45,8 @@ struct qm_dfx_item { struct qm_cmd_dump_item { const char *cmd; - char *info_name; - int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name); + const char *info_name; + int (*dump_fn)(struct hisi_qm *qm, char *cmd, const char *info_name); }; static struct qm_dfx_item qm_dfx_files[] = { @@ -151,7 +151,7 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, } static void dump_show(struct hisi_qm *qm, void *info, - unsigned int info_size, char *info_name) + unsigned int info_size, const char *info_name) { struct device *dev = &qm->pdev->dev; u8 *info_curr = info; @@ -165,7 +165,7 @@ static void dump_show(struct hisi_qm *qm, void *info, } } -static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_sqc_dump(struct hisi_qm *qm, char *s, const char *name) { struct device *dev = &qm->pdev->dev; struct qm_sqc sqc; @@ -202,7 +202,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) return 0; } -static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_cqc_dump(struct hisi_qm *qm, char *s, const char *name) { struct device *dev = &qm->pdev->dev; struct qm_cqc cqc; @@ -239,7 +239,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) return 0; } -static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, const char *name) { struct device *dev = &qm->pdev->dev; struct qm_aeqc aeqc; @@ -317,7 +317,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, return 0; } -static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_sq_dump(struct hisi_qm *qm, char *s, const char *name) { u16 sq_depth = qm->qp_array->sq_depth; struct hisi_qp *qp; @@ -345,7 +345,7 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name) return 0; } -static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_cq_dump(struct hisi_qm *qm, char *s, const char *name) { struct qm_cqe *cqe_curr; struct hisi_qp *qp; @@ -363,7 +363,7 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name) return 0; } -static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name) +static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, const char *name) { struct device *dev = &qm->pdev->dev; u16 xeq_depth; From 6e7619dc6a243f972aabc22c349e1f6b371fbd24 Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Mon, 30 Mar 2026 14:25:29 +0800 Subject: [PATCH 115/129] crypto: hisilicon/qm - remove else after return Else condition is not needed after a return, remove it. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d1626685ed9f..0588355920dd 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4278,8 +4278,8 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (num_vfs == 0) return hisi_qm_sriov_disable(pdev, false); - else - return hisi_qm_sriov_enable(pdev, num_vfs); + + return hisi_qm_sriov_enable(pdev, num_vfs); } EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); From d2b5e8d3193df4e2795f405e1757d6384124330a Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Mon, 30 Mar 2026 14:25:30 +0800 Subject: [PATCH 116/129] crypto: hisilicon/qm - drop redundant variable initialization Variables are assigned before used. Initialization is not required. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 0588355920dd..2bb51d4d88a6 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3381,7 +3381,7 @@ static int __hisi_qm_start(struct hisi_qm *qm) int hisi_qm_start(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - int ret = 0; + int ret; down_write(&qm->qps_lock); @@ -3917,8 +3917,8 @@ back_func_qos: static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) { - u64 cir_u = 0, cir_b = 0, cir_s = 0; u64 shaper_vft, ir_calc, ir; + u64 cir_u, cir_b, cir_s; unsigned int val; u32 error_rate; int ret; From 06c42142cf8aaeba3fa3c4336717b87ca4eebf8a Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Mon, 30 Mar 2026 14:25:31 +0800 Subject: [PATCH 117/129] crypto: hisilicon - remove unused and non-public APIs for qm and sec - sec_register_to_crypto() and sec_unregister_from_crypto() have been removed, the function declarations have not been removed. Remove them. - hisi_qm_start_qp and hisi_qm_stop_qp are called internally by the QM. Therefore, the EXPORT_SYMBOL_GPL declaration of these non-public interfaces is deleted. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 8 ++++---- drivers/crypto/hisilicon/sec2/sec.h | 2 -- include/linux/hisi_acc_qm.h | 2 -- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 2bb51d4d88a6..3ca47e2a9719 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -472,6 +472,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { static void qm_irqs_unregister(struct hisi_qm *qm); static int qm_reset_device(struct hisi_qm *qm); +static void hisi_qm_stop_qp(struct hisi_qp *qp); + int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, unsigned int device) { @@ -2262,7 +2264,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) * After this function, qp can receive request from user. Return 0 if * successful, negative error code if failed. */ -int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +static int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; int ret; @@ -2273,7 +2275,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) return ret; } -EXPORT_SYMBOL_GPL(hisi_qm_start_qp); /** * qp_stop_fail_cb() - call request cb. @@ -2418,13 +2419,12 @@ static void qm_stop_qp_nolock(struct hisi_qp *qp) * * This function is reverse of hisi_qm_start_qp. */ -void hisi_qm_stop_qp(struct hisi_qp *qp) +static void hisi_qm_stop_qp(struct hisi_qp *qp) { down_write(&qp->qm->qps_lock); qm_stop_qp_nolock(qp); up_write(&qp->qm->qps_lock); } -EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); /** * hisi_qp_send() - Queue up a task in the hardware queue. diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 0710977861f3..adf95795dffe 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -285,7 +285,5 @@ enum sec_cap_table_type { void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); -int sec_register_to_crypto(struct hisi_qm *qm); -void sec_unregister_from_crypto(struct hisi_qm *qm); u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); #endif diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 8a581b5bbbcd..a6268dc4f7cb 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -558,8 +558,6 @@ int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); -int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); -void hisi_qm_stop_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); void hisi_qm_debug_init(struct hisi_qm *qm); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); From 01d798e9feb30212952d4e992801ba6bd6a82351 Mon Sep 17 00:00:00 2001 From: Haixin Xu Date: Mon, 30 Mar 2026 15:23:46 +0800 Subject: [PATCH 118/129] crypto: jitterentropy - replace long-held spinlock with mutex jent_kcapi_random() serializes the shared jitterentropy state, but it currently holds a spinlock across the jent_read_entropy() call. That path performs expensive jitter collection and SHA3 conditioning, so parallel readers can trigger stalls as contending waiters spin for the same lock. To prevent non-preemptible lock hold, replace rng->jent_lock with a mutex so contended readers sleep instead of spinning on a shared lock held across expensive entropy generation. Fixes: bb5530e40824 ("crypto: jitterentropy - add jitterentropy RNG") Reported-by: Yifan Wu Reported-by: Juefei Pu Reported-by: Yuan Tan Suggested-by: Xin Liu Signed-off-by: Haixin Xu Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 7c880cf34c52..5edc6d285aa1 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -193,7 +194,7 @@ int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len) ***************************************************************************/ struct jitterentropy { - spinlock_t jent_lock; + struct mutex jent_lock; struct rand_data *entropy_collector; struct crypto_shash *tfm; struct shash_desc *sdesc; @@ -203,7 +204,7 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm) { struct jitterentropy *rng = crypto_tfm_ctx(tfm); - spin_lock(&rng->jent_lock); + mutex_lock(&rng->jent_lock); if (rng->sdesc) { shash_desc_zero(rng->sdesc); @@ -218,7 +219,7 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm) if (rng->entropy_collector) jent_entropy_collector_free(rng->entropy_collector); rng->entropy_collector = NULL; - spin_unlock(&rng->jent_lock); + mutex_unlock(&rng->jent_lock); } static int jent_kcapi_init(struct crypto_tfm *tfm) @@ -228,7 +229,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) struct shash_desc *sdesc; int size, ret = 0; - spin_lock_init(&rng->jent_lock); + mutex_init(&rng->jent_lock); /* Use SHA3-256 as conditioner */ hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); @@ -257,7 +258,6 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) goto err; } - spin_lock_init(&rng->jent_lock); return 0; err: @@ -272,7 +272,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm, struct jitterentropy *rng = crypto_rng_ctx(tfm); int ret = 0; - spin_lock(&rng->jent_lock); + mutex_lock(&rng->jent_lock); ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); @@ -298,7 +298,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm, ret = -EINVAL; } - spin_unlock(&rng->jent_lock); + mutex_unlock(&rng->jent_lock); return ret; } From 655ef638a2bc3cd0a9eff99a02f83cab94a3a917 Mon Sep 17 00:00:00 2001 From: Paul Louvel Date: Mon, 30 Mar 2026 12:28:18 +0200 Subject: [PATCH 119/129] crypto: talitos - fix SEC1 32k ahash request limitation Since commit c662b043cdca ("crypto: af_alg/hash: Support MSG_SPLICE_PAGES"), the crypto core may pass large scatterlists spanning multiple pages to drivers supporting ahash operations. As a result, a driver can now receive large ahash requests. The SEC1 engine has a limitation where a single descriptor cannot process more than 32k of data. The current implementation attempts to handle the entire request within a single descriptor, which leads to failures raised by the driver: "length exceeds h/w max limit" Address this limitation by splitting large ahash requests into multiple descriptors, each respecting the 32k hardware limit. This allows processing arbitrarily large requests. Cc: stable@vger.kernel.org Fixes: c662b043cdca ("crypto: af_alg/hash: Support MSG_SPLICE_PAGES") Signed-off-by: Paul Louvel Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 310 ++++++++++++++++++++++++--------------- 1 file changed, 194 insertions(+), 116 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index e8c0db687c57..4c325fa0eac1 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -12,6 +12,7 @@ * All rights reserved. */ +#include #include #include #include @@ -870,10 +871,18 @@ struct talitos_ahash_req_ctx { unsigned int swinit; unsigned int first; unsigned int last; + unsigned int last_request; unsigned int to_hash_later; unsigned int nbuf; struct scatterlist bufsl[2]; struct scatterlist *psrc; + + struct scatterlist request_bufsl[2]; + struct ahash_request *areq; + struct scatterlist *request_sl; + unsigned int remaining_ahash_request_bytes; + unsigned int current_ahash_request_bytes; + struct work_struct sec1_ahash_process_remaining; }; struct talitos_export_state { @@ -1759,7 +1768,20 @@ static void ahash_done(struct device *dev, kfree(edesc); - ahash_request_complete(areq, err); + if (err) { + ahash_request_complete(areq, err); + return; + } + + req_ctx->remaining_ahash_request_bytes -= + req_ctx->current_ahash_request_bytes; + + if (!req_ctx->remaining_ahash_request_bytes) { + ahash_request_complete(areq, 0); + return; + } + + schedule_work(&req_ctx->sec1_ahash_process_remaining); } /* @@ -1925,6 +1947,171 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, nbytes, 0, 0, 0, areq->base.flags, false); } +static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int nbytes_to_hash; + unsigned int to_hash_later; + unsigned int nsg; + int nents; + struct device *dev = ctx->dev; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; + + if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { + /* Buffer up to one whole block */ + nents = sg_nents_for_len(req_ctx->request_sl, nbytes); + if (nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + return nents; + } + sg_copy_to_buffer(req_ctx->request_sl, nents, + ctx_buf + req_ctx->nbuf, nbytes); + req_ctx->nbuf += nbytes; + return 0; + } + + /* At least (blocksize + 1) bytes are available to hash */ + nbytes_to_hash = nbytes + req_ctx->nbuf; + to_hash_later = nbytes_to_hash & (blocksize - 1); + + if (req_ctx->last) + to_hash_later = 0; + else if (to_hash_later) + /* There is a partial block. Hash the full block(s) now */ + nbytes_to_hash -= to_hash_later; + else { + /* Keep one block buffered */ + nbytes_to_hash -= blocksize; + to_hash_later = blocksize; + } + + /* Chain in any previously buffered data */ + if (!is_sec1 && req_ctx->nbuf) { + nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; + sg_init_table(req_ctx->bufsl, nsg); + sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); + if (nsg > 1) + sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl); + req_ctx->psrc = req_ctx->bufsl; + } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { + int offset; + + if (nbytes_to_hash > blocksize) + offset = blocksize - req_ctx->nbuf; + else + offset = nbytes_to_hash - req_ctx->nbuf; + nents = sg_nents_for_len(req_ctx->request_sl, offset); + if (nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + return nents; + } + sg_copy_to_buffer(req_ctx->request_sl, nents, + ctx_buf + req_ctx->nbuf, offset); + req_ctx->nbuf += offset; + req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl, + offset); + } else + req_ctx->psrc = req_ctx->request_sl; + + if (to_hash_later) { + nents = sg_nents_for_len(req_ctx->request_sl, nbytes); + if (nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + return nents; + } + sg_pcopy_to_buffer(req_ctx->request_sl, nents, + req_ctx->buf[(req_ctx->buf_idx + 1) & 1], + to_hash_later, + nbytes - to_hash_later); + } + req_ctx->to_hash_later = to_hash_later; + + /* Allocate extended descriptor */ + edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template; + + /* On last one, request SEC to pad; otherwise continue */ + if (req_ctx->last) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; + else + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; + + /* request SEC to INIT hash. */ + if (req_ctx->first && !req_ctx->swinit) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; + + /* When the tfm context has a keylen, it's an HMAC. + * A first or last (ie. not middle) descriptor must request HMAC. + */ + if (ctx->keylen && (req_ctx->first || req_ctx->last)) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; + + return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done); +} + +static void sec1_ahash_process_remaining(struct work_struct *work) +{ + struct talitos_ahash_req_ctx *req_ctx = + container_of(work, struct talitos_ahash_req_ctx, + sec1_ahash_process_remaining); + int err = 0; + + req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl, + req_ctx->request_sl, TALITOS1_MAX_DATA_LEN); + + if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN) + req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN; + else { + req_ctx->current_ahash_request_bytes = + req_ctx->remaining_ahash_request_bytes; + + if (req_ctx->last_request) + req_ctx->last = 1; + } + + err = ahash_process_req_one(req_ctx->areq, + req_ctx->current_ahash_request_bytes); + + if (err != -EINPROGRESS) + ahash_request_complete(req_ctx->areq, err); +} + +static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct device *dev = ctx->dev; + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + req_ctx->areq = areq; + req_ctx->request_sl = areq->src; + req_ctx->remaining_ahash_request_bytes = nbytes; + + if (is_sec1) { + if (nbytes > TALITOS1_MAX_DATA_LEN) + nbytes = TALITOS1_MAX_DATA_LEN; + else if (req_ctx->last_request) + req_ctx->last = 1; + } + + req_ctx->current_ahash_request_bytes = nbytes; + + return ahash_process_req_one(req_ctx->areq, + req_ctx->current_ahash_request_bytes); +} + static int ahash_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); @@ -1943,6 +2130,9 @@ static int ahash_init(struct ahash_request *areq) ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; req_ctx->hw_context_size = size; + req_ctx->last_request = 0; + req_ctx->last = 0; + INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining); dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, DMA_TO_DEVICE); @@ -1978,123 +2168,11 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq) return 0; } -static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); - struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - struct talitos_edesc *edesc; - unsigned int blocksize = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int nbytes_to_hash; - unsigned int to_hash_later; - unsigned int nsg; - int nents; - struct device *dev = ctx->dev; - struct talitos_private *priv = dev_get_drvdata(dev); - bool is_sec1 = has_ftr_sec1(priv); - u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; - - if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { - /* Buffer up to one whole block */ - nents = sg_nents_for_len(areq->src, nbytes); - if (nents < 0) { - dev_err(dev, "Invalid number of src SG.\n"); - return nents; - } - sg_copy_to_buffer(areq->src, nents, - ctx_buf + req_ctx->nbuf, nbytes); - req_ctx->nbuf += nbytes; - return 0; - } - - /* At least (blocksize + 1) bytes are available to hash */ - nbytes_to_hash = nbytes + req_ctx->nbuf; - to_hash_later = nbytes_to_hash & (blocksize - 1); - - if (req_ctx->last) - to_hash_later = 0; - else if (to_hash_later) - /* There is a partial block. Hash the full block(s) now */ - nbytes_to_hash -= to_hash_later; - else { - /* Keep one block buffered */ - nbytes_to_hash -= blocksize; - to_hash_later = blocksize; - } - - /* Chain in any previously buffered data */ - if (!is_sec1 && req_ctx->nbuf) { - nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; - sg_init_table(req_ctx->bufsl, nsg); - sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); - if (nsg > 1) - sg_chain(req_ctx->bufsl, 2, areq->src); - req_ctx->psrc = req_ctx->bufsl; - } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { - int offset; - - if (nbytes_to_hash > blocksize) - offset = blocksize - req_ctx->nbuf; - else - offset = nbytes_to_hash - req_ctx->nbuf; - nents = sg_nents_for_len(areq->src, offset); - if (nents < 0) { - dev_err(dev, "Invalid number of src SG.\n"); - return nents; - } - sg_copy_to_buffer(areq->src, nents, - ctx_buf + req_ctx->nbuf, offset); - req_ctx->nbuf += offset; - req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, - offset); - } else - req_ctx->psrc = areq->src; - - if (to_hash_later) { - nents = sg_nents_for_len(areq->src, nbytes); - if (nents < 0) { - dev_err(dev, "Invalid number of src SG.\n"); - return nents; - } - sg_pcopy_to_buffer(areq->src, nents, - req_ctx->buf[(req_ctx->buf_idx + 1) & 1], - to_hash_later, - nbytes - to_hash_later); - } - req_ctx->to_hash_later = to_hash_later; - - /* Allocate extended descriptor */ - edesc = ahash_edesc_alloc(areq, nbytes_to_hash); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - edesc->desc.hdr = ctx->desc_hdr_template; - - /* On last one, request SEC to pad; otherwise continue */ - if (req_ctx->last) - edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; - else - edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; - - /* request SEC to INIT hash. */ - if (req_ctx->first && !req_ctx->swinit) - edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; - - /* When the tfm context has a keylen, it's an HMAC. - * A first or last (ie. not middle) descriptor must request HMAC. - */ - if (ctx->keylen && (req_ctx->first || req_ctx->last)) - edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; - - return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); -} - static int ahash_update(struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - req_ctx->last = 0; + req_ctx->last_request = 0; return ahash_process_req(areq, areq->nbytes); } @@ -2103,7 +2181,7 @@ static int ahash_final(struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - req_ctx->last = 1; + req_ctx->last_request = 1; return ahash_process_req(areq, 0); } @@ -2112,7 +2190,7 @@ static int ahash_finup(struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - req_ctx->last = 1; + req_ctx->last_request = 1; return ahash_process_req(areq, areq->nbytes); } From a1b80018b8cec27fc06a8b04a7f8b5f6cfe86eae Mon Sep 17 00:00:00 2001 From: Paul Louvel Date: Mon, 30 Mar 2026 12:28:19 +0200 Subject: [PATCH 120/129] crypto: talitos - rename first/last to first_desc/last_desc Previous commit introduces a new last_request variable in the context structure. Renaming the first/last existing member variable in the context structure to improve readability. Cc: stable@vger.kernel.org Signed-off-by: Paul Louvel Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 46 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 4c325fa0eac1..bc61d0fe3514 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -869,8 +869,8 @@ struct talitos_ahash_req_ctx { u8 buf[2][HASH_MAX_BLOCK_SIZE]; int buf_idx; unsigned int swinit; - unsigned int first; - unsigned int last; + unsigned int first_desc; + unsigned int last_desc; unsigned int last_request; unsigned int to_hash_later; unsigned int nbuf; @@ -889,8 +889,8 @@ struct talitos_export_state { u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; u8 buf[HASH_MAX_BLOCK_SIZE]; unsigned int swinit; - unsigned int first; - unsigned int last; + unsigned int first_desc; + unsigned int last_desc; unsigned int to_hash_later; unsigned int nbuf; }; @@ -1722,7 +1722,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev, if (desc->next_desc && desc->ptr[5].ptr != desc2->ptr[5].ptr) unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); - if (req_ctx->last) + if (req_ctx->last_desc) memcpy(areq->result, req_ctx->hw_context, crypto_ahash_digestsize(tfm)); @@ -1759,7 +1759,7 @@ static void ahash_done(struct device *dev, container_of(desc, struct talitos_edesc, desc); struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - if (!req_ctx->last && req_ctx->to_hash_later) { + if (!req_ctx->last_desc && req_ctx->to_hash_later) { /* Position any partial block for next update/final/finup */ req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1; req_ctx->nbuf = req_ctx->to_hash_later; @@ -1825,7 +1825,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* first DWORD empty */ /* hash context in */ - if (!req_ctx->first || req_ctx->swinit) { + if (!req_ctx->first_desc || req_ctx->swinit) { map_single_talitos_ptr_nosync(dev, &desc->ptr[1], req_ctx->hw_context_size, req_ctx->hw_context, @@ -1833,7 +1833,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, req_ctx->swinit = 0; } /* Indicate next op is not the first. */ - req_ctx->first = 0; + req_ctx->first_desc = 0; /* HMAC key */ if (ctx->keylen) @@ -1866,7 +1866,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* fifth DWORD empty */ /* hash/HMAC out -or- hash context out */ - if (req_ctx->last) + if (req_ctx->last_desc) map_single_talitos_ptr(dev, &desc->ptr[5], crypto_ahash_digestsize(tfm), req_ctx->hw_context, DMA_FROM_DEVICE); @@ -1908,7 +1908,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (sg_count > 1) sync_needed = true; copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); - if (req_ctx->last) + if (req_ctx->last_desc) map_single_talitos_ptr_nosync(dev, &desc->ptr[5], req_ctx->hw_context_size, req_ctx->hw_context, @@ -1964,7 +1964,7 @@ static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes bool is_sec1 = has_ftr_sec1(priv); u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; - if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { + if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) { /* Buffer up to one whole block */ nents = sg_nents_for_len(req_ctx->request_sl, nbytes); if (nents < 0) { @@ -1981,7 +1981,7 @@ static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes nbytes_to_hash = nbytes + req_ctx->nbuf; to_hash_later = nbytes_to_hash & (blocksize - 1); - if (req_ctx->last) + if (req_ctx->last_desc) to_hash_later = 0; else if (to_hash_later) /* There is a partial block. Hash the full block(s) now */ @@ -2041,19 +2041,19 @@ static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes edesc->desc.hdr = ctx->desc_hdr_template; /* On last one, request SEC to pad; otherwise continue */ - if (req_ctx->last) + if (req_ctx->last_desc) edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; else edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; /* request SEC to INIT hash. */ - if (req_ctx->first && !req_ctx->swinit) + if (req_ctx->first_desc && !req_ctx->swinit) edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; /* When the tfm context has a keylen, it's an HMAC. * A first or last (ie. not middle) descriptor must request HMAC. */ - if (ctx->keylen && (req_ctx->first || req_ctx->last)) + if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc)) edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done); @@ -2076,7 +2076,7 @@ static void sec1_ahash_process_remaining(struct work_struct *work) req_ctx->remaining_ahash_request_bytes; if (req_ctx->last_request) - req_ctx->last = 1; + req_ctx->last_desc = 1; } err = ahash_process_req_one(req_ctx->areq, @@ -2103,7 +2103,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) if (nbytes > TALITOS1_MAX_DATA_LEN) nbytes = TALITOS1_MAX_DATA_LEN; else if (req_ctx->last_request) - req_ctx->last = 1; + req_ctx->last_desc = 1; } req_ctx->current_ahash_request_bytes = nbytes; @@ -2124,14 +2124,14 @@ static int ahash_init(struct ahash_request *areq) /* Initialize the context */ req_ctx->buf_idx = 0; req_ctx->nbuf = 0; - req_ctx->first = 1; /* first indicates h/w must init its context */ + req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */ req_ctx->swinit = 0; /* assume h/w init of context */ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; req_ctx->hw_context_size = size; req_ctx->last_request = 0; - req_ctx->last = 0; + req_ctx->last_desc = 0; INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining); dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, @@ -2224,8 +2224,8 @@ static int ahash_export(struct ahash_request *areq, void *out) req_ctx->hw_context_size); memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); export->swinit = req_ctx->swinit; - export->first = req_ctx->first; - export->last = req_ctx->last; + export->first_desc = req_ctx->first_desc; + export->last_desc = req_ctx->last_desc; export->to_hash_later = req_ctx->to_hash_later; export->nbuf = req_ctx->nbuf; @@ -2250,8 +2250,8 @@ static int ahash_import(struct ahash_request *areq, const void *in) memcpy(req_ctx->hw_context, export->hw_context, size); memcpy(req_ctx->buf[0], export->buf, export->nbuf); req_ctx->swinit = export->swinit; - req_ctx->first = export->first; - req_ctx->last = export->last; + req_ctx->first_desc = export->first_desc; + req_ctx->last_desc = export->last_desc; req_ctx->to_hash_later = export->to_hash_later; req_ctx->nbuf = export->nbuf; From 1ee57ab93b75eb59f426aef37b5498a7ffc28278 Mon Sep 17 00:00:00 2001 From: Thomas Fourier Date: Mon, 30 Mar 2026 17:19:32 +0200 Subject: [PATCH 121/129] crypto: hisilicon - Fix dma_unmap_single() direction The direction used to map the buffer skreq->iv is DMA_TO_DEVICE but it is unmapped with direction DMA_BIDIRECTIONAL in the error path. Change the unmap to match the mapping. Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver") Cc: Signed-off-by: Thomas Fourier Reviewed-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_algs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 54e24fd7b9be..85eecbb40e7e 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -844,7 +844,7 @@ err_free_elements: if (crypto_skcipher_ivsize(atfm)) dma_unmap_single(info->dev, sec_req->dma_iv, crypto_skcipher_ivsize(atfm), - DMA_BIDIRECTIONAL); + DMA_TO_DEVICE); err_unmap_out_sg: if (split) sec_unmap_sg_on_err(skreq->dst, steps, splits_out, From 3787fb7697a942baa25361bfc3390575e5659db8 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 30 Mar 2026 19:39:25 +0200 Subject: [PATCH 122/129] crypto: qce - simplify qce_xts_swapiv() Declare 'swap' as zero-initialized and use a single index variable to simplify the byte-swapping loop in qce_xts_swapiv(). Add a comment for clarity. Signed-off-by: Thorsten Blum Reviewed-by: Bjorn Andersson Signed-off-by: Herbert Xu --- drivers/crypto/qce/common.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index 04253a8d3340..54a78a57f630 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -280,17 +280,17 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) { - u8 swap[QCE_AES_IV_LENGTH]; - u32 i, j; + u8 swap[QCE_AES_IV_LENGTH] = {0}; + unsigned int i, offset; if (ivsize > QCE_AES_IV_LENGTH) return; - memset(swap, 0, QCE_AES_IV_LENGTH); + offset = QCE_AES_IV_LENGTH - ivsize; - for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; - i < QCE_AES_IV_LENGTH; i++, j--) - swap[i] = src[j]; + /* Reverse and right-align IV bytes. */ + for (i = 0; i < ivsize; i++) + swap[offset + i] = src[ivsize - 1 - i]; qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); } From 2418431211d5d348245a79b41cf0cb89bcadc27b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 31 Mar 2026 17:36:29 +0900 Subject: [PATCH 123/129] crypto: geniv - Remove unused spinlock from struct aead_geniv_ctx The spin lock in geniv hasn't been used in over 10 years. Remove it. Signed-off-by: Herbert Xu --- crypto/geniv.c | 2 -- include/crypto/internal/geniv.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/crypto/geniv.c b/crypto/geniv.c index c619a5ad2fc1..04befe3a7f44 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -112,8 +112,6 @@ int aead_init_geniv(struct crypto_aead *aead) struct crypto_aead *child; int err; - spin_lock_init(&ctx->lock); - err = crypto_stdrng_get_bytes(ctx->salt, crypto_aead_ivsize(aead)); if (err) goto out; diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 012f5fb22d43..e38d9f0487ec 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -9,11 +9,9 @@ #define _CRYPTO_INTERNAL_GENIV_H #include -#include #include struct aead_geniv_ctx { - spinlock_t lock; struct crypto_aead *child; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; From 95aed2af87ec43fa7624cc81dd13d37824ad4972 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Wed, 1 Apr 2026 10:31:11 +0100 Subject: [PATCH 124/129] crypto: qat - fix IRQ cleanup on 6xxx probe failure When adf_dev_up() partially completes and then fails, the IRQ handlers registered during adf_isr_resource_alloc() are not detached before the MSI-X vectors are released. Since the device is enabled with pcim_enable_device(), calling pci_alloc_irq_vectors() internally registers pcim_msi_release() as a devres action. On probe failure, devres runs pcim_msi_release() which calls pci_free_irq_vectors(), tearing down the MSI-X vectors while IRQ handlers (for example 'qat0-bundle0') are still attached. This causes remove_proc_entry() warnings: [ 22.163964] remove_proc_entry: removing non-empty directory 'irq/143', leaking at least 'qat0-bundle0' Moving the devm_add_action_or_reset() before adf_dev_up() does not solve the problem since devres runs in LIFO order and pcim_msi_release(), registered later inside adf_dev_up(), would still fire before adf_device_down(). Fix by calling adf_dev_down() explicitly when adf_dev_up() fails, to properly free IRQ handlers before devres releases the MSI-X vectors. Fixes: 17fd7514ae68 ("crypto: qat - add qat_6xxx driver") Cc: stable@vger.kernel.org Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: Laurent M Coquerel Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_6xxx/adf_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c index 0684ea9be2ac..c52462a48c34 100644 --- a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c @@ -209,8 +209,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; ret = adf_dev_up(accel_dev, true); - if (ret) + if (ret) { + adf_dev_down(accel_dev); return ret; + } ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev); if (ret) From 93d93d91d3f2c9c54fdbccc69596bd1f20ae2ca8 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 3 Apr 2026 13:21:37 +0200 Subject: [PATCH 125/129] crypto: atmel-ecc - add Thorsten Blum as maintainer Add Thorsten Blum as maintainer of the atmel-ecc driver. Signed-off-by: Thorsten Blum Acked-by: Nicolas Ferre Signed-off-by: Herbert Xu --- MAINTAINERS | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 106f8264ea8d..b7f19bbc0b6b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17207,9 +17207,10 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml F: drivers/media/platform/microchip/microchip-csi2dc.c MICROCHIP ECC DRIVER +M: Thorsten Blum L: linux-crypto@vger.kernel.org -S: Orphan -F: drivers/crypto/atmel-ecc.* +S: Maintained +F: drivers/crypto/atmel-ecc.c MICROCHIP EIC DRIVER M: Claudiu Beznea From a883b38a6f616a84d75213705c64d96c37536d74 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 3 Apr 2026 13:21:39 +0200 Subject: [PATCH 126/129] crypto: atmel-sha204a - add Thorsten Blum as maintainer Add a MAINTAINERS entry for the atmel-sha204a driver and Thorsten Blum as maintainer. Signed-off-by: Thorsten Blum Acked-by: Nicolas Ferre Signed-off-by: Herbert Xu --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index b7f19bbc0b6b..f42d0f64b8cd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17188,6 +17188,12 @@ S: Supported F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml F: drivers/spi/spi-at91-usart.c +MICROCHIP ATSHA204A DRIVER +M: Thorsten Blum +L: linux-crypto@vger.kernel.org +S: Maintained +F: drivers/crypto/atmel-sha204a.c + MICROCHIP AUDIO ASOC DRIVERS M: Claudiu Beznea M: Andrei Simion From 809c9b60cf03e083b1ae0c6aa4a369b2eeda9900 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 4 Apr 2026 12:10:17 +0200 Subject: [PATCH 127/129] crypto: omap - convert reqctx buffer to fixed-size array The flexible array member 'buffer' in 'omap_sham_reqctx' is always allocated with BUFLEN bytes. Replace the flexible array with a fixed-size array and remove the now-redundant 'buflen' field. Since 'struct omap_sham_reqctx' now includes the buffer, simplify 'reqsize' and 'statesize' and use an offsetof-based memcpy() in omap_sham_export() and omap_sham_import(). Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6a3c7f9277cf..b8c416c5ee70 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -147,7 +147,6 @@ struct omap_sham_reqctx { u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; size_t bufcnt; - size_t buflen; /* walk state */ struct scatterlist *sg; @@ -156,7 +155,7 @@ struct omap_sham_reqctx { int sg_len; unsigned int total; /* total request */ - u8 buffer[] OMAP_ALIGNED; + u8 buffer[BUFLEN] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { @@ -891,7 +890,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq) if (hash_later < 0) hash_later = 0; - if (hash_later && hash_later <= rctx->buflen) { + if (hash_later && hash_later <= sizeof(rctx->buffer)) { scatterwalk_map_and_copy(rctx->buffer, req->src, req->nbytes - hash_later, @@ -902,7 +901,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq) rctx->bufcnt = 0; } - if (hash_later > rctx->buflen) + if (hash_later > sizeof(rctx->buffer)) set_bit(FLAGS_HUGE, &rctx->dd->flags); rctx->total = min(nbytes, rctx->total); @@ -987,7 +986,6 @@ static int omap_sham_init(struct ahash_request *req) ctx->digcnt = 0; ctx->total = 0; ctx->offset = 0; - ctx->buflen = BUFLEN; if (tctx->flags & BIT(FLAGS_HMAC)) { if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { @@ -1200,7 +1198,7 @@ static int omap_sham_update(struct ahash_request *req) if (!req->nbytes) return 0; - if (ctx->bufcnt + req->nbytes <= ctx->buflen) { + if (ctx->bufcnt + req->nbytes <= sizeof(ctx->buffer)) { scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, 0, req->nbytes, 0); ctx->bufcnt += req->nbytes; @@ -1333,7 +1331,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct omap_sham_reqctx) + BUFLEN); + sizeof(struct omap_sham_reqctx)); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -1404,7 +1402,8 @@ static int omap_sham_export(struct ahash_request *req, void *out) { struct omap_sham_reqctx *rctx = ahash_request_ctx(req); - memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt); + memcpy(out, rctx, offsetof(struct omap_sham_reqctx, buffer) + + rctx->bufcnt); return 0; } @@ -1414,7 +1413,8 @@ static int omap_sham_import(struct ahash_request *req, const void *in) struct omap_sham_reqctx *rctx = ahash_request_ctx(req); const struct omap_sham_reqctx *ctx_in = in; - memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt); + memcpy(rctx, in, offsetof(struct omap_sham_reqctx, buffer) + + ctx_in->bufcnt); return 0; } @@ -2146,8 +2146,7 @@ static int omap_sham_probe(struct platform_device *pdev) alg = &ealg->base; alg->export = omap_sham_export; alg->import = omap_sham_import; - alg->halg.statesize = sizeof(struct omap_sham_reqctx) + - BUFLEN; + alg->halg.statesize = sizeof(struct omap_sham_reqctx); err = crypto_engine_register_ahash(ealg); if (err) goto err_algs; From c697c5fcfb5e73c723ca7d9f003e37b2b9534520 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 4 Apr 2026 15:52:03 +0200 Subject: [PATCH 128/129] crypto: vmx - remove CRYPTO_DEV_VMX from Kconfig CRYPTO_DEV_VMX has been moved to arch/powerpc/crypto/Kconfig, remove it. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 189f5beb8bfa..971f17a15543 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -668,14 +668,6 @@ config CRYPTO_DEV_QCOM_RNG To compile this driver as a module, choose M here. The module will be called qcom-rng. If unsure, say N. -#config CRYPTO_DEV_VMX -# bool "Support for VMX cryptographic acceleration instructions" -# depends on PPC64 && VSX -# help -# Support for VMX cryptographic acceleration instructions. -# -#source "drivers/crypto/vmx/Kconfig" - config CRYPTO_DEV_IMGTEC_HASH tristate "Imagination Technologies hardware hash accelerator" depends on MIPS || COMPILE_TEST From 8879a3c110cb8ca5a69c937643f226697aa551d9 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sun, 5 Apr 2026 21:49:41 +0200 Subject: [PATCH 129/129] crypto: af_alg - use sock_kmemdup in alg_setkey_by_key_serial Replace sock_kmalloc() followed by memcpy() with sock_kmemdup() to simplify alg_setkey_by_key_serial(). Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- crypto/af_alg.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 0bb609fbec7d..b491e5477c70 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -324,15 +324,13 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, return PTR_ERR(ret); } - key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); + key_data = sock_kmemdup(&ask->sk, ret, key_datalen, GFP_KERNEL); if (!key_data) { up_read(&key->sem); key_put(key); return -ENOMEM; } - memcpy(key_data, ret, key_datalen); - up_read(&key->sem); key_put(key);