Merge tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux

Pull crypto library updates from Eric Biggers:

 - Migrate more hash algorithms from the traditional crypto subsystem to
   lib/crypto/

   Like the algorithms migrated earlier (e.g. SHA-*), this simplifies
   the implementations, improves performance, enables further
   simplifications in calling code, and solves various other issues:

     - AES CBC-based MACs (AES-CMAC, AES-XCBC-MAC, and AES-CBC-MAC)

         - Support these algorithms in lib/crypto/ using the AES library
           and the existing arm64 assembly code

         - Reimplement the traditional crypto API's "cmac(aes)",
           "xcbc(aes)", and "cbcmac(aes)" on top of the library

         - Convert mac80211 to use the AES-CMAC library. Note: several
           other subsystems can use it too and will be converted later

         - Drop the broken, nonstandard, and likely unused support for
           "xcbc(aes)" with key lengths other than 128 bits

         - Enable optimizations by default

     - GHASH

         - Migrate the standalone GHASH code into lib/crypto/

         - Integrate the GHASH code more closely with the very similar
           POLYVAL code, and improve the generic GHASH implementation to
           resist cache-timing attacks and use much less memory

         - Reimplement the AES-GCM library and the "gcm" crypto_aead
           template on top of the GHASH library. Remove "ghash" from the
           crypto_shash API, as it's no longer needed

         - Enable optimizations by default

     - SM3

         - Migrate the kernel's existing SM3 code into lib/crypto/, and
           reimplement the traditional crypto API's "sm3" on top of it

         - I don't recommend using SM3, but this cleanup is worthwhile
           to organize the code the same way as other algorithms

 - Testing improvements:

     - Add a KUnit test suite for each of the new library APIs

     - Migrate the existing ChaCha20Poly1305 test to KUnit

     - Make the KUnit all_tests.config enable all crypto library tests

     - Move the test kconfig options to the Runtime Testing menu

 - Other updates to arch-optimized crypto code:

     - Optimize SHA-256 for Zhaoxin CPUs using the Padlock Hash Engine

     - Remove some MD5 implementations that are no longer worth keeping

     - Drop big endian and voluntary preemption support from the arm64
       code, as those configurations are no longer supported on arm64

 - Make jitterentropy and samples/tsm-mr use the crypto library APIs

* tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (66 commits)
  lib/crypto: arm64: Assume a little-endian kernel
  arm64: fpsimd: Remove obsolete cond_yield macro
  lib/crypto: arm64/sha3: Remove obsolete chunking logic
  lib/crypto: arm64/sha512: Remove obsolete chunking logic
  lib/crypto: arm64/sha256: Remove obsolete chunking logic
  lib/crypto: arm64/sha1: Remove obsolete chunking logic
  lib/crypto: arm64/poly1305: Remove obsolete chunking logic
  lib/crypto: arm64/gf128hash: Remove obsolete chunking logic
  lib/crypto: arm64/chacha: Remove obsolete chunking logic
  lib/crypto: arm64/aes: Remove obsolete chunking logic
  lib/crypto: Include <crypto/utils.h> instead of <crypto/algapi.h>
  lib/crypto: aesgcm: Don't disable IRQs during AES block encryption
  lib/crypto: aescfb: Don't disable IRQs during AES block encryption
  lib/crypto: tests: Migrate ChaCha20Poly1305 self-test to KUnit
  lib/crypto: sparc: Drop optimized MD5 code
  lib/crypto: mips: Drop optimized MD5 code
  lib: Move crypto library tests to Runtime Testing menu
  crypto: sm3 - Remove 'struct sm3_state'
  crypto: sm3 - Remove the original "sm3_block_generic()"
  crypto: sm3 - Remove sm3_base.h
  ...
This commit is contained in:
Linus Torvalds
2026-04-13 17:31:39 -07:00
154 changed files with 4888 additions and 4884 deletions

View File

@@ -355,6 +355,8 @@ config CRYPTO_AES
tristate "AES (Advanced Encryption Standard)"
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
select CRYPTO_LIB_AES_CBC_MACS if CRYPTO_CMAC || CRYPTO_XCBC || CRYPTO_CCM
select CRYPTO_HASH if CRYPTO_CMAC || CRYPTO_XCBC || CRYPTO_CCM
help
AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3)
@@ -678,7 +680,7 @@ config CRYPTO_ECB
config CRYPTO_HCTR2
tristate "HCTR2"
select CRYPTO_XCTR
select CRYPTO_LIB_POLYVAL
select CRYPTO_LIB_GF128HASH
select CRYPTO_MANAGER
help
HCTR2 length-preserving encryption mode
@@ -786,7 +788,7 @@ config CRYPTO_GCM
tristate "GCM (Galois/Counter Mode) and GMAC (GCM MAC)"
select CRYPTO_CTR
select CRYPTO_AEAD
select CRYPTO_GHASH
select CRYPTO_LIB_GF128HASH
select CRYPTO_MANAGER
help
GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
@@ -880,13 +882,6 @@ config CRYPTO_CMAC
CMAC (Cipher-based Message Authentication Code) authentication
mode (NIST SP800-38B and IETF RFC4493)
config CRYPTO_GHASH
tristate "GHASH"
select CRYPTO_HASH
select CRYPTO_LIB_GF128MUL
help
GCM GHASH function (NIST SP800-38D)
config CRYPTO_HMAC
tristate "HMAC (Keyed-Hash MAC)"
select CRYPTO_HASH
@@ -973,7 +968,7 @@ config CRYPTO_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
config CRYPTO_SM3_GENERIC
config CRYPTO_SM3
tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
select CRYPTO_LIB_SM3
@@ -1169,8 +1164,8 @@ endif # if CRYPTO_DRBG_MENU
config CRYPTO_JITTERENTROPY
tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)"
select CRYPTO_LIB_SHA3
select CRYPTO_RNG
select CRYPTO_SHA3
help
CPU Jitter RNG (Random Number Generator) from the Jitterentropy library

View File

@@ -82,7 +82,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_SM3) += sm3.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
@@ -170,7 +170,6 @@ UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o

View File

@@ -5,8 +5,10 @@
* Copyright 2026 Google LLC
*/
#include <crypto/aes-cbc-macs.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <linux/module.h>
static_assert(__alignof__(struct aes_key) <= CRYPTO_MINALIGN);
@@ -33,6 +35,98 @@ static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
aes_decrypt(key, out, in);
}
static_assert(__alignof__(struct aes_cmac_key) <= CRYPTO_MINALIGN);
#define AES_CMAC_KEY(tfm) ((struct aes_cmac_key *)crypto_shash_ctx(tfm))
#define AES_CMAC_CTX(desc) ((struct aes_cmac_ctx *)shash_desc_ctx(desc))
static int __maybe_unused crypto_aes_cmac_setkey(struct crypto_shash *tfm,
const u8 *in_key,
unsigned int key_len)
{
return aes_cmac_preparekey(AES_CMAC_KEY(tfm), in_key, key_len);
}
static int __maybe_unused crypto_aes_xcbc_setkey(struct crypto_shash *tfm,
const u8 *in_key,
unsigned int key_len)
{
if (key_len != AES_KEYSIZE_128)
return -EINVAL;
aes_xcbcmac_preparekey(AES_CMAC_KEY(tfm), in_key);
return 0;
}
static int __maybe_unused crypto_aes_cmac_init(struct shash_desc *desc)
{
aes_cmac_init(AES_CMAC_CTX(desc), AES_CMAC_KEY(desc->tfm));
return 0;
}
static int __maybe_unused crypto_aes_cmac_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
aes_cmac_update(AES_CMAC_CTX(desc), data, len);
return 0;
}
static int __maybe_unused crypto_aes_cmac_final(struct shash_desc *desc,
u8 *out)
{
aes_cmac_final(AES_CMAC_CTX(desc), out);
return 0;
}
static int __maybe_unused crypto_aes_cmac_digest(struct shash_desc *desc,
const u8 *data,
unsigned int len, u8 *out)
{
aes_cmac(AES_CMAC_KEY(desc->tfm), data, len, out);
return 0;
}
static_assert(__alignof__(struct aes_enckey) <= CRYPTO_MINALIGN);
#define AES_CBCMAC_KEY(tfm) ((struct aes_enckey *)crypto_shash_ctx(tfm))
#define AES_CBCMAC_CTX(desc) ((struct aes_cbcmac_ctx *)shash_desc_ctx(desc))
static int __maybe_unused crypto_aes_cbcmac_setkey(struct crypto_shash *tfm,
const u8 *in_key,
unsigned int key_len)
{
return aes_prepareenckey(AES_CBCMAC_KEY(tfm), in_key, key_len);
}
static int __maybe_unused crypto_aes_cbcmac_init(struct shash_desc *desc)
{
aes_cbcmac_init(AES_CBCMAC_CTX(desc), AES_CBCMAC_KEY(desc->tfm));
return 0;
}
static int __maybe_unused crypto_aes_cbcmac_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
aes_cbcmac_update(AES_CBCMAC_CTX(desc), data, len);
return 0;
}
static int __maybe_unused crypto_aes_cbcmac_final(struct shash_desc *desc,
u8 *out)
{
aes_cbcmac_final(AES_CBCMAC_CTX(desc), out);
return 0;
}
static int __maybe_unused crypto_aes_cbcmac_digest(struct shash_desc *desc,
const u8 *data,
unsigned int len, u8 *out)
{
aes_cbcmac_init(AES_CBCMAC_CTX(desc), AES_CBCMAC_KEY(desc->tfm));
aes_cbcmac_update(AES_CBCMAC_CTX(desc), data, len);
aes_cbcmac_final(AES_CBCMAC_CTX(desc), out);
return 0;
}
static struct crypto_alg alg = {
.cra_name = "aes",
.cra_driver_name = "aes-lib",
@@ -48,19 +142,106 @@ static struct crypto_alg alg = {
.cia_decrypt = crypto_aes_decrypt } }
};
static struct shash_alg mac_algs[] = {
#if IS_ENABLED(CONFIG_CRYPTO_CMAC)
{
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-lib",
.base.cra_priority = 300,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aes_cmac_key),
.base.cra_module = THIS_MODULE,
.digestsize = AES_BLOCK_SIZE,
.setkey = crypto_aes_cmac_setkey,
.init = crypto_aes_cmac_init,
.update = crypto_aes_cmac_update,
.final = crypto_aes_cmac_final,
.digest = crypto_aes_cmac_digest,
.descsize = sizeof(struct aes_cmac_ctx),
},
#endif
#if IS_ENABLED(CONFIG_CRYPTO_XCBC)
{
/*
* Note that the only difference between xcbc(aes) and cmac(aes)
* is the preparekey function.
*/
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-lib",
.base.cra_priority = 300,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aes_cmac_key),
.base.cra_module = THIS_MODULE,
.digestsize = AES_BLOCK_SIZE,
.setkey = crypto_aes_xcbc_setkey,
.init = crypto_aes_cmac_init,
.update = crypto_aes_cmac_update,
.final = crypto_aes_cmac_final,
.digest = crypto_aes_cmac_digest,
.descsize = sizeof(struct aes_cmac_ctx),
},
#endif
#if IS_ENABLED(CONFIG_CRYPTO_CCM)
{
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-lib",
.base.cra_priority = 300,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aes_enckey),
.base.cra_module = THIS_MODULE,
.digestsize = AES_BLOCK_SIZE,
.setkey = crypto_aes_cbcmac_setkey,
.init = crypto_aes_cbcmac_init,
.update = crypto_aes_cbcmac_update,
.final = crypto_aes_cbcmac_final,
.digest = crypto_aes_cbcmac_digest,
.descsize = sizeof(struct aes_cbcmac_ctx),
},
#endif
};
static int __init crypto_aes_mod_init(void)
{
return crypto_register_alg(&alg);
int err = crypto_register_alg(&alg);
if (err)
return err;
if (ARRAY_SIZE(mac_algs) > 0) {
err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs));
if (err)
goto err_unregister_alg;
} /* Else, CONFIG_CRYPTO_HASH might not be enabled. */
return 0;
err_unregister_alg:
crypto_unregister_alg(&alg);
return err;
}
module_init(crypto_aes_mod_init);
static void __exit crypto_aes_mod_exit(void)
{
if (ARRAY_SIZE(mac_algs) > 0)
crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
crypto_unregister_alg(&alg);
}
module_exit(crypto_aes_mod_exit);
MODULE_DESCRIPTION("Crypto API support for AES block cipher");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("aes-lib");
#if IS_ENABLED(CONFIG_CRYPTO_CMAC)
MODULE_ALIAS_CRYPTO("cmac(aes)");
MODULE_ALIAS_CRYPTO("cmac-aes-lib");
#endif
#if IS_ENABLED(CONFIG_CRYPTO_XCBC)
MODULE_ALIAS_CRYPTO("xcbc(aes)");
MODULE_ALIAS_CRYPTO("xcbc-aes-lib");
#endif
#if IS_ENABLED(CONFIG_CRYPTO_CCM)
MODULE_ALIAS_CRYPTO("cbcmac(aes)");
MODULE_ALIAS_CRYPTO("cbcmac-aes-lib");
#endif

View File

@@ -5,13 +5,11 @@
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
*/
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
#include <crypto/gf128hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -20,12 +18,11 @@
struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr;
struct crypto_ahash_spawn ghash;
};
struct crypto_gcm_ctx {
struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
struct ghash_key ghash;
};
struct crypto_rfc4106_ctx {
@@ -52,31 +49,15 @@ struct crypto_rfc4543_req_ctx {
struct aead_request subreq;
};
struct crypto_gcm_ghash_ctx {
unsigned int cryptlen;
struct scatterlist *src;
int (*complete)(struct aead_request *req, u32 flags);
};
struct crypto_gcm_req_priv_ctx {
u8 iv[16];
u8 auth_tag[16];
u8 iauth_tag[16];
struct scatterlist src[3];
struct scatterlist dst[3];
struct scatterlist sg;
struct crypto_gcm_ghash_ctx ghash_ctx;
union {
struct ahash_request ahreq;
struct skcipher_request skreq;
} u;
struct skcipher_request skreq; /* Must be last */
};
static struct {
u8 buf[16];
struct scatterlist sg;
} *gcm_zeroes;
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -89,10 +70,9 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ahash *ghash = ctx->ghash;
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
u8 h[GHASH_BLOCK_SIZE];
u8 iv[16];
struct crypto_wait wait;
@@ -115,14 +95,14 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
return -ENOMEM;
crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
sg_init_one(data->sg, data->h, sizeof(data->h));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done,
&data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
sizeof(data->h), data->iv);
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
&data->wait);
@@ -130,10 +110,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
ghash_preparekey(&ctx->ghash, data->h);
out:
kfree_sensitive(data);
return err;
@@ -176,7 +153,7 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
dst = req->src == req->dst ? pctx->src : pctx->dst;
@@ -187,244 +164,65 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
pctx->iv);
}
static inline unsigned int gcm_remain(unsigned int len)
static void ghash_update_sg_and_pad(struct ghash_ctx *ghash,
struct scatterlist *sg, unsigned int len)
{
len &= 0xfU;
return len ? 16 - len : 0;
static const u8 zeroes[GHASH_BLOCK_SIZE];
if (len) {
unsigned int pad_len = -len % GHASH_BLOCK_SIZE;
struct scatter_walk walk;
scatterwalk_start(&walk, sg);
do {
unsigned int n = scatterwalk_next(&walk, len);
ghash_update(ghash, walk.addr, n);
scatterwalk_done_src(&walk, n);
len -= n;
} while (len);
if (pad_len)
ghash_update(ghash, zeroes, pad_len);
}
}
static void gcm_hash_len_done(void *data, int err);
static int gcm_hash_update(struct aead_request *req,
crypto_completion_t compl,
struct scatterlist *src,
unsigned int len, u32 flags)
static void gcm_hash(struct aead_request *req, struct scatterlist *ctext,
unsigned int datalen, u8 out[GHASH_BLOCK_SIZE])
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
const struct crypto_gcm_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
__be64 lengths[2] = {
cpu_to_be64(8 * (u64)req->assoclen),
cpu_to_be64(8 * (u64)datalen),
};
struct ghash_ctx ghash;
ahash_request_set_callback(ahreq, flags, compl, req);
ahash_request_set_crypt(ahreq, src, NULL, len);
ghash_init(&ghash, &ctx->ghash);
return crypto_ahash_update(ahreq);
/* Associated data, then zero-padding to the next 16-byte boundary */
ghash_update_sg_and_pad(&ghash, req->src, req->assoclen);
/* Ciphertext, then zero-padding to the next 16-byte boundary */
ghash_update_sg_and_pad(&ghash, ctext, datalen);
/* Lengths block */
ghash_update(&ghash, (const u8 *)lengths, sizeof(lengths));
ghash_final(&ghash, out);
}
static int gcm_hash_remain(struct aead_request *req,
unsigned int remain,
crypto_completion_t compl, u32 flags)
static int gcm_add_auth_tag(struct aead_request *req)
{
return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags);
}
static int gcm_hash_len(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
be128 lengths;
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
memcpy(pctx->iauth_tag, &lengths, 16);
sg_init_one(&pctx->sg, pctx->iauth_tag, 16);
ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req);
ahash_request_set_crypt(ahreq, &pctx->sg,
pctx->iauth_tag, sizeof(lengths));
return crypto_ahash_finup(ahreq);
}
static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
return gctx->complete(req, flags);
}
static void gcm_hash_len_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_len_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
{
return gcm_hash_len(req, flags) ?:
gcm_hash_len_continue(req, flags);
}
static void gcm_hash_crypt_remain_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_crypt_remain_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
remain = gcm_remain(gctx->cryptlen);
if (remain)
return gcm_hash_remain(req, remain,
gcm_hash_crypt_remain_done, flags) ?:
gcm_hash_crypt_remain_continue(req, flags);
return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_crypt_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_crypt_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
if (gctx->cryptlen)
return gcm_hash_update(req, gcm_hash_crypt_done,
gctx->src, gctx->cryptlen, flags) ?:
gcm_hash_crypt_continue(req, flags);
return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_assoc_remain_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_assoc_remain_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
{
unsigned int remain;
remain = gcm_remain(req->assoclen);
if (remain)
return gcm_hash_remain(req, remain,
gcm_hash_assoc_remain_done, flags) ?:
gcm_hash_assoc_remain_continue(req, flags);
return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_assoc_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_assoc_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
{
if (req->assoclen)
return gcm_hash_update(req, gcm_hash_assoc_done,
req->src, req->assoclen, flags) ?:
gcm_hash_assoc_continue(req, flags);
return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_init_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_init_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ahash_request_set_tfm(ahreq, ctx->ghash);
ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req);
return crypto_ahash_init(ahreq) ?:
gcm_hash_init_continue(req, flags);
}
static int gcm_enc_copy_hash(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
crypto_xor(auth_tag, pctx->iauth_tag, 16);
scatterwalk_map_and_copy(auth_tag, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
}
static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
gctx->cryptlen = req->cryptlen;
gctx->complete = gcm_enc_copy_hash;
return gcm_hash(req, flags);
gcm_hash(req, sg_next(req->src == req->dst ? pctx->src : pctx->dst),
req->cryptlen, pctx->iauth_tag);
crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
memcpy_to_sglist(req->dst, req->assoclen + req->cryptlen,
pctx->auth_tag, crypto_aead_authsize(aead));
return 0;
}
static void gcm_encrypt_done(void *data, int err)
@@ -434,9 +232,7 @@ static void gcm_encrypt_done(void *data, int err)
if (err)
goto out;
err = gcm_encrypt_continue(req, 0);
if (err == -EINPROGRESS)
return;
err = gcm_add_auth_tag(req);
out:
aead_request_complete(req, err);
@@ -445,15 +241,14 @@ out:
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
struct skcipher_request *skreq = &pctx->skreq;
u32 flags = aead_request_flags(req);
crypto_gcm_init_common(req);
crypto_gcm_init_crypt(req, req->cryptlen);
skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
return crypto_skcipher_encrypt(skreq) ?:
gcm_encrypt_continue(req, flags);
return crypto_skcipher_encrypt(skreq) ?: gcm_add_auth_tag(req);
}
static int crypto_gcm_verify(struct aead_request *req)
@@ -481,35 +276,21 @@ static void gcm_decrypt_done(void *data, int err)
aead_request_complete(req, err);
}
static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_gcm_init_crypt(req, gctx->cryptlen);
skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
}
static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
u32 flags = aead_request_flags(req);
cryptlen -= authsize;
struct skcipher_request *skreq = &pctx->skreq;
unsigned int datalen = req->cryptlen - crypto_aead_authsize(aead);
crypto_gcm_init_common(req);
gctx->src = sg_next(pctx->src);
gctx->cryptlen = cryptlen;
gctx->complete = gcm_dec_hash_continue;
gcm_hash(req, sg_next(pctx->src), datalen, pctx->iauth_tag);
return gcm_hash(req, flags);
crypto_gcm_init_crypt(req, datalen);
skcipher_request_set_callback(skreq, aead_request_flags(req),
gcm_decrypt_done, req);
return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
}
static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
@@ -518,43 +299,26 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
unsigned long align;
int err;
ghash = crypto_spawn_ahash(&ictx->ghash);
if (IS_ERR(ghash))
return PTR_ERR(ghash);
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_hash;
return PTR_ERR(ctr);
ctx->ctr = ctr;
ctx->ghash = ghash;
align = crypto_aead_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(tfm,
align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
max(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(ctr),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(ghash)));
align + sizeof(struct crypto_gcm_req_priv_ctx) +
crypto_skcipher_reqsize(ctr));
return 0;
err_free_hash:
crypto_free_ahash(ghash);
return err;
}
static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->ghash);
crypto_free_skcipher(ctx->ctr);
}
@@ -563,20 +327,16 @@ static void crypto_gcm_free(struct aead_instance *inst)
struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr);
crypto_drop_ahash(&ctx->ghash);
kfree(inst);
}
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *ctr_name,
const char *ghash_name)
struct rtattr **tb, const char *ctr_name)
{
struct skcipher_alg_common *ctr;
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
struct hash_alg_common *ghash;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
@@ -588,17 +348,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst),
ghash_name, 0, mask);
if (err)
goto err_free_inst;
ghash = crypto_spawn_ahash_alg(&ctx->ghash);
err = -EINVAL;
if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
ghash->digestsize != 16)
goto err_free_inst;
err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst),
ctr_name, 0, mask);
if (err)
@@ -617,13 +366,11 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
"gcm_base(%s,ghash-lib)",
ctr->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_priority = ctr->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
@@ -660,7 +407,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
return crypto_gcm_create_common(tmpl, tb, ctr_name);
}
static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -677,7 +424,16 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
/*
* Originally this parameter allowed requesting a specific
* implementation of GHASH. This is no longer supported. Now the best
* implementation of GHASH is just always used.
*/
if (strcmp(ghash_name, "ghash") != 0 &&
strcmp(ghash_name, "ghash-lib") != 0)
return -EINVAL;
return crypto_gcm_create_common(tmpl, tb, ctr_name);
}
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1096,25 +852,12 @@ static struct crypto_template crypto_gcm_tmpls[] = {
static int __init crypto_gcm_module_init(void)
{
int err;
gcm_zeroes = kzalloc_obj(*gcm_zeroes);
if (!gcm_zeroes)
return -ENOMEM;
sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf));
err = crypto_register_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
if (err)
kfree(gcm_zeroes);
return err;
return crypto_register_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
}
static void __exit crypto_gcm_module_exit(void)
{
kfree(gcm_zeroes);
crypto_unregister_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
}

View File

@@ -1,162 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* GHASH: hash function for GCM (Galois/Counter Mode).
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*/
/*
* GHASH is a keyed hash function used in GCM authentication tag generation.
*
* The original GCM paper [1] presents GHASH as a function GHASH(H, A, C) which
* takes a 16-byte hash key H, additional authenticated data A, and a ciphertext
* C. It formats A and C into a single byte string X, interprets X as a
* polynomial over GF(2^128), and evaluates this polynomial at the point H.
*
* However, the NIST standard for GCM [2] presents GHASH as GHASH(H, X) where X
* is the already-formatted byte string containing both A and C.
*
* "ghash" in the Linux crypto API uses the 'X' (pre-formatted) convention,
* since the API supports only a single data stream per hash. Thus, the
* formatting of 'A' and 'C' is done in the "gcm" template, not in "ghash".
*
* The reason "ghash" is separate from "gcm" is to allow "gcm" to use an
* accelerated "ghash" when a standalone accelerated "gcm(aes)" is unavailable.
* It is generally inappropriate to use "ghash" for other purposes, since it is
* an "ε-almost-XOR-universal hash function", not a cryptographic hash function.
* It can only be used securely in crypto modes specially designed to use it.
*
* [1] The Galois/Counter Mode of Operation (GCM)
* (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.694.695&rep=rep1&type=pdf)
* [2] Recommendation for Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
be128 k;
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
ctx->gf128 = gf128mul_init_4k_lle(&k);
memzero_explicit(&k, GHASH_BLOCK_SIZE);
if (!ctx->gf128)
return -ENOMEM;
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
do {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
} while (srclen >= GHASH_BLOCK_SIZE);
return srclen;
}
static void ghash_flush(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *dst = dctx->buffer;
if (len) {
crypto_xor(dst, src, len);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
}
static int ghash_finup(struct shash_desc *desc, const u8 *src,
unsigned int len, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *buf = dctx->buffer;
ghash_flush(desc, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
static void ghash_exit_tfm(struct crypto_tfm *tfm)
{
struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_exit = ghash_exit_tfm,
},
};
static int __init ghash_mod_init(void)
{
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH hash function");
MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("ghash-generic");

View File

@@ -16,9 +16,9 @@
* (https://eprint.iacr.org/2021/1441.pdf)
*/
#include <crypto/gf128hash.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/polyval.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>

View File

@@ -37,7 +37,6 @@
* DAMAGE.
*/
#include <crypto/hash.h>
#include <crypto/sha3.h>
#include <linux/fips.h>
#include <linux/kernel.h>
@@ -48,8 +47,6 @@
#include "jitterentropy.h"
#define JENT_CONDITIONING_HASH "sha3-256"
/***************************************************************************
* Helper function
***************************************************************************/
@@ -101,22 +98,14 @@ void jent_get_nstime(__u64 *out)
jent_raw_hires_entropy_store(tmp);
}
int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck)
void jent_hash_time(struct sha3_ctx *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm);
struct sha3_ctx tmp_state; /* zeroized by sha3_final() */
u8 intermediary[SHA3_256_DIGEST_SIZE];
__u64 j = 0;
int ret;
desc->tfm = hash_state_desc->tfm;
if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) {
pr_warn_ratelimited("Unexpected digest size\n");
return -EINVAL;
}
kmsan_unpoison_memory(intermediary, sizeof(intermediary));
/*
@@ -130,24 +119,20 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
*
* Note, it does not matter which or how much data you inject, we are
* interested in one Keccack1600 compression operation performed with
* the crypto_shash_final.
* the sha3_final.
*/
for (j = 0; j < hash_loop_cnt; j++) {
ret = crypto_shash_init(desc) ?:
crypto_shash_update(desc, intermediary,
sizeof(intermediary)) ?:
crypto_shash_finup(desc, addtl, addtl_len, intermediary);
if (ret)
goto err;
sha3_256_init(&tmp_state);
sha3_update(&tmp_state, intermediary, sizeof(intermediary));
sha3_update(&tmp_state, addtl, addtl_len);
sha3_final(&tmp_state, intermediary);
}
/*
* Inject the data from the previous loop into the pool. This data is
* not considered to contain any entropy, but it stirs the pool a bit.
*/
ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary));
if (ret)
goto err;
sha3_update(hash_state, intermediary, sizeof(intermediary));
/*
* Insert the time stamp into the hash context representing the pool.
@@ -162,30 +147,24 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
time = 0;
}
ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64));
err:
shash_desc_zero(desc);
sha3_update(hash_state, (u8 *)&time, sizeof(__u64));
memzero_explicit(intermediary, sizeof(intermediary));
return ret;
}
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
void jent_read_random_block(struct sha3_ctx *hash_state, char *dst,
unsigned int dst_len)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
u8 jent_block[SHA3_256_DIGEST_SIZE];
/* Obtain data from entropy pool and re-initialize it */
int ret = crypto_shash_final(hash_state_desc, jent_block) ?:
crypto_shash_init(hash_state_desc) ?:
crypto_shash_update(hash_state_desc, jent_block,
sizeof(jent_block));
if (!ret && dst_len)
/* Obtain data from entropy pool and re-initialize it */
sha3_final(hash_state, jent_block);
sha3_256_init(hash_state);
sha3_update(hash_state, jent_block, sizeof(jent_block));
if (dst_len)
memcpy(dst, jent_block, dst_len);
memzero_explicit(jent_block, sizeof(jent_block));
return ret;
}
/***************************************************************************
@@ -195,8 +174,7 @@ int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
struct crypto_shash *tfm;
struct shash_desc *sdesc;
struct sha3_ctx hash_state;
};
static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
@@ -205,15 +183,7 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
spin_lock(&rng->jent_lock);
if (rng->sdesc) {
shash_desc_zero(rng->sdesc);
kfree(rng->sdesc);
}
rng->sdesc = NULL;
if (rng->tfm)
crypto_free_shash(rng->tfm);
rng->tfm = NULL;
memzero_explicit(&rng->hash_state, sizeof(rng->hash_state));
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
@@ -224,34 +194,15 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
static int jent_kcapi_init(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
struct crypto_shash *hash;
struct shash_desc *sdesc;
int size, ret = 0;
int ret = 0;
spin_lock_init(&rng->jent_lock);
/* Use SHA3-256 as conditioner */
hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(hash)) {
pr_err("Cannot allocate conditioning digest\n");
return PTR_ERR(hash);
}
rng->tfm = hash;
sha3_256_init(&rng->hash_state);
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
ret = -ENOMEM;
goto err;
}
sdesc->tfm = hash;
crypto_shash_init(sdesc);
rng->sdesc = sdesc;
rng->entropy_collector =
jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0,
sdesc);
rng->entropy_collector = jent_entropy_collector_alloc(
CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, &rng->hash_state);
if (!rng->entropy_collector) {
ret = -ENOMEM;
goto err;
@@ -326,23 +277,16 @@ static struct rng_alg jent_alg = {
static int __init jent_mod_init(void)
{
SHASH_DESC_ON_STACK(desc, tfm);
struct crypto_shash *tfm;
struct sha3_ctx hash_state;
int ret = 0;
jent_testing_init();
tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(tfm)) {
jent_testing_exit();
return PTR_ERR(tfm);
}
sha3_256_init(&hash_state);
desc->tfm = tfm;
crypto_shash_init(desc);
ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL);
shash_desc_zero(desc);
crypto_free_shash(tfm);
ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, &hash_state,
NULL);
memzero_explicit(&hash_state, sizeof(hash_state));
if (ret) {
/* Handle permanent health test error */
if (fips_enabled)

View File

@@ -68,7 +68,7 @@ struct rand_data {
* of the RNG are marked as SENSITIVE. A user must not
* access that information while the RNG executes its loops to
* calculate the next random value. */
void *hash_state; /* SENSITIVE hash state entropy pool */
struct sha3_ctx *hash_state; /* SENSITIVE hash state entropy pool */
__u64 prev_time; /* SENSITIVE Previous time stamp */
__u64 last_delta; /* SENSITIVE stuck test */
__s64 last_delta2; /* SENSITIVE stuck test */
@@ -417,10 +417,9 @@ static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min)
* time [in] time stamp to be injected
* stuck [in] Is the time stamp identified as stuck?
*
* Output:
* updated hash context in the entropy collector or error code
* Output: updated hash context in the entropy collector
*/
static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
static void jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
{
#define SHA3_HASH_LOOP (1<<3)
struct {
@@ -435,8 +434,8 @@ static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
ec->apt_base
};
return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
SHA3_HASH_LOOP, stuck);
jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
SHA3_HASH_LOOP, stuck);
}
/*
@@ -538,8 +537,7 @@ static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta)
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
if (jent_condition_data(ec, current_delta, stuck))
stuck = 1;
jent_condition_data(ec, current_delta, stuck);
/* return the raw entropy value */
if (ret_current_delta)
@@ -597,7 +595,7 @@ static void jent_gen_entropy(struct rand_data *ec)
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL or the generation failed
* -1 entropy_collector is NULL
* -2 Intermittent health failure
* -3 Permanent health failure
*/
@@ -640,8 +638,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
}
tocopy = min(DATA_SIZE_BITS / 8, len);
if (jent_read_random_block(ec->hash_state, p, tocopy))
return -1;
jent_read_random_block(ec->hash_state, p, tocopy);
len -= tocopy;
p += tocopy;
@@ -656,7 +653,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags,
void *hash_state)
struct sha3_ctx *hash_state)
{
struct rand_data *entropy_collector;
@@ -704,8 +701,8 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector)
jent_zfree(entropy_collector);
}
int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state,
struct rand_data *p_ec)
int jent_entropy_init(unsigned int osr, unsigned int flags,
struct sha3_ctx *hash_state, struct rand_data *p_ec)
{
/*
* If caller provides an allocated ec, reuse it which implies that the

View File

@@ -1,24 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-or-later
struct sha3_ctx;
extern void *jent_kvzalloc(unsigned int len);
extern void jent_kvzfree(void *ptr, unsigned int len);
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
extern void jent_get_nstime(__u64 *out);
extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck);
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len);
void jent_hash_time(struct sha3_ctx *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck);
void jent_read_random_block(struct sha3_ctx *hash_state, char *dst,
unsigned int dst_len);
struct rand_data;
extern int jent_entropy_init(unsigned int osr, unsigned int flags,
void *hash_state, struct rand_data *p_ec);
struct sha3_ctx *hash_state,
struct rand_data *p_ec);
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len);
extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags,
void *hash_state);
extern struct rand_data *
jent_entropy_collector_alloc(unsigned int osr, unsigned int flags,
struct sha3_ctx *hash_state);
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE

89
crypto/sm3.c Normal file
View File

@@ -0,0 +1,89 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
* described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Written by Gilad Ben-Yossef <gilad@benyossef.com>
* Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
* Copyright 2026 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define SM3_CTX(desc) ((struct sm3_ctx *)shash_desc_ctx(desc))
static int crypto_sm3_init(struct shash_desc *desc)
{
sm3_init(SM3_CTX(desc));
return 0;
}
static int crypto_sm3_update(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
sm3_update(SM3_CTX(desc), data, len);
return 0;
}
static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
{
sm3_final(SM3_CTX(desc), out);
return 0;
}
static int crypto_sm3_digest(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
sm3(data, len, out);
return 0;
}
static int crypto_sm3_export_core(struct shash_desc *desc, void *out)
{
memcpy(out, SM3_CTX(desc), sizeof(struct sm3_ctx));
return 0;
}
static int crypto_sm3_import_core(struct shash_desc *desc, const void *in)
{
memcpy(SM3_CTX(desc), in, sizeof(struct sm3_ctx));
return 0;
}
static struct shash_alg sm3_alg = {
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-lib",
.base.cra_priority = 300,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.digestsize = SM3_DIGEST_SIZE,
.init = crypto_sm3_init,
.update = crypto_sm3_update,
.final = crypto_sm3_final,
.digest = crypto_sm3_digest,
.export_core = crypto_sm3_export_core,
.import_core = crypto_sm3_import_core,
.descsize = sizeof(struct sm3_ctx),
};
static int __init crypto_sm3_mod_init(void)
{
return crypto_register_shash(&sm3_alg);
}
module_init(crypto_sm3_mod_init);
static void __exit crypto_sm3_mod_exit(void)
{
crypto_unregister_shash(&sm3_alg);
}
module_exit(crypto_sm3_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Crypto API support for SM3");
MODULE_ALIAS_CRYPTO("sm3");
MODULE_ALIAS_CRYPTO("sm3-lib");

View File

@@ -1,72 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
* described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Written by Gilad Ben-Yossef <gilad@benyossef.com>
* Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/kernel.h>
#include <linux/module.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
};
EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic);
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
sm3_base_do_finup(desc, data, len, sm3_block_generic);
return sm3_base_finish(desc, hash);
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
.finup = crypto_sm3_finup,
.descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sm3_generic_mod_init(void)
{
return crypto_register_shash(&sm3_alg);
}
static void __exit sm3_generic_mod_fini(void)
{
crypto_unregister_shash(&sm3_alg);
}
module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sm3");
MODULE_ALIAS_CRYPTO("sm3-generic");

View File

@@ -1650,10 +1650,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("rfc4309(ccm(aes))"));
break;
case 46:
ret = min(ret, tcrypt_test("ghash"));
break;
case 48:
ret = min(ret, tcrypt_test("sha3-224"));
break;
@@ -2251,11 +2247,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("blake2b-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 318:
klen = 16;
test_hash_speed("ghash", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;

View File

@@ -4388,7 +4388,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif
.alg = "cbcmac(aes)",
.generic_driver = "cbcmac(aes-lib)",
.generic_driver = "cbcmac-aes-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cbcmac_tv_template)
@@ -4401,7 +4401,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ccm(aes)",
.generic_driver = "ccm_base(ctr(aes-lib),cbcmac(aes-lib))",
.generic_driver = "ccm_base(ctr(aes-lib),cbcmac-aes-lib)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4429,7 +4429,7 @@ static const struct alg_test_desc alg_test_descs[] = {
},
}, {
.alg = "cmac(aes)",
.generic_driver = "cmac(aes-lib)",
.generic_driver = "cmac-aes-lib",
.fips_allowed = 1,
.test = alg_test_hash,
.suite = {
@@ -4965,7 +4965,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
.alg = "gcm(aes)",
.generic_driver = "gcm_base(ctr(aes-lib),ghash-generic)",
.generic_driver = "gcm_base(ctr(aes-lib),ghash-lib)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4973,24 +4973,18 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "gcm(aria)",
.generic_driver = "gcm_base(ctr(aria-generic),ghash-generic)",
.generic_driver = "gcm_base(ctr(aria-generic),ghash-lib)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(aria_gcm_tv_template)
}
}, {
.alg = "gcm(sm4)",
.generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
.generic_driver = "gcm_base(ctr(sm4-generic),ghash-lib)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(sm4_gcm_tv_template)
}
}, {
.alg = "ghash",
.test = alg_test_hash,
.suite = {
.hash = __VECS(ghash_tv_template)
}
}, {
.alg = "hctr2(aes)",
.generic_driver = "hctr2_base(xctr(aes-lib),polyval-lib)",
@@ -5085,6 +5079,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sm3)",
.generic_driver = "hmac(sm3-lib)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_sm3_tv_template)
@@ -5314,7 +5309,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4106(gcm(aes))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-lib),ghash-generic))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-lib),ghash-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -5326,7 +5321,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4309(ccm(aes))",
.generic_driver = "rfc4309(ccm_base(ctr(aes-lib),cbcmac(aes-lib)))",
.generic_driver = "rfc4309(ccm_base(ctr(aes-lib),cbcmac-aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -5338,7 +5333,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4543(gcm(aes))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-lib),ghash-generic))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-lib),ghash-lib))",
.test = alg_test_aead,
.suite = {
.aead = {
@@ -5452,6 +5447,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sm3",
.generic_driver = "sm3-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(sm3_tv_template)
@@ -5515,7 +5511,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "xcbc(aes)",
.generic_driver = "xcbc(aes-lib)",
.generic_driver = "xcbc-aes-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_xcbc128_tv_template)

View File

@@ -6183,115 +6183,6 @@ static const struct hash_testvec wp256_tv_template[] = {
},
};
static const struct hash_testvec ghash_tv_template[] =
{
{
.key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
"\xff\xca\xff\x95\xf8\x30\xf0\x61",
.ksize = 16,
.plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
"\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
.psize = 16,
.digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
"\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
}, {
.key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ksize = 16,
.plaintext = "what do ya want for nothing?",
.psize = 28,
.digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce"
"\x0d\x61\x06\x27\x66\x51\xd5\xe2",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 16,
.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
.psize = 50,
.digest = "\xfb\x49\x8a\x36\xe1\x96\xe1\x96"
"\xe1\x96\xe1\x96\xe1\x96\xe1\x96",
}, {
.key = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
"\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
.ksize = 16,
.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
.psize = 50,
.digest = "\x2b\x5c\x0c\x7f\x52\xd1\x60\xc2"
"\x49\xed\x6e\x32\x7a\xa9\xbe\x08",
}, {
.key = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
"\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
.ksize = 16,
.plaintext = "Test With Truncation",
.psize = 20,
.digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28"
"\x23\xf7\x93\xf7\x19\xf5\x96\xd9",
}, {
.key = "\x0a\x1b\x2c\x3d\x4e\x5f\x64\x71"
"\x82\x93\xa4\xb5\xc6\xd7\xe8\xf9",
.ksize = 16,
.plaintext = "\x56\x6f\x72\x20\x6c\x61\x75\x74"
"\x65\x72\x20\x4c\x61\x75\x73\x63"
"\x68\x65\x6e\x20\x75\x6e\x64\x20"
"\x53\x74\x61\x75\x6e\x65\x6e\x20"
"\x73\x65\x69\x20\x73\x74\x69\x6c"
"\x6c\x2c\x0a\x64\x75\x20\x6d\x65"
"\x69\x6e\x20\x74\x69\x65\x66\x74"
"\x69\x65\x66\x65\x73\x20\x4c\x65"
"\x62\x65\x6e\x3b\x0a\x64\x61\x73"
"\x73\x20\x64\x75\x20\x77\x65\x69"
"\xc3\x9f\x74\x20\x77\x61\x73\x20"
"\x64\x65\x72\x20\x57\x69\x6e\x64"
"\x20\x64\x69\x72\x20\x77\x69\x6c"
"\x6c\x2c\x0a\x65\x68\x20\x6e\x6f"
"\x63\x68\x20\x64\x69\x65\x20\x42"
"\x69\x72\x6b\x65\x6e\x20\x62\x65"
"\x62\x65\x6e\x2e\x0a\x0a\x55\x6e"
"\x64\x20\x77\x65\x6e\x6e\x20\x64"
"\x69\x72\x20\x65\x69\x6e\x6d\x61"
"\x6c\x20\x64\x61\x73\x20\x53\x63"
"\x68\x77\x65\x69\x67\x65\x6e\x20"
"\x73\x70\x72\x61\x63\x68\x2c\x0a"
"\x6c\x61\x73\x73\x20\x64\x65\x69"
"\x6e\x65\x20\x53\x69\x6e\x6e\x65"
"\x20\x62\x65\x73\x69\x65\x67\x65"
"\x6e\x2e\x0a\x4a\x65\x64\x65\x6d"
"\x20\x48\x61\x75\x63\x68\x65\x20"
"\x67\x69\x62\x74\x20\x64\x69\x63"
"\x68\x2c\x20\x67\x69\x62\x20\x6e"
"\x61\x63\x68\x2c\x0a\x65\x72\x20"
"\x77\x69\x72\x64\x20\x64\x69\x63"
"\x68\x20\x6c\x69\x65\x62\x65\x6e"
"\x20\x75\x6e\x64\x20\x77\x69\x65"
"\x67\x65\x6e\x2e\x0a\x0a\x55\x6e"
"\x64\x20\x64\x61\x6e\x6e\x20\x6d"
"\x65\x69\x6e\x65\x20\x53\x65\x65"
"\x6c\x65\x20\x73\x65\x69\x74\x20"
"\x77\x65\x69\x74\x2c\x20\x73\x65"
"\x69\x20\x77\x65\x69\x74\x2c\x0a"
"\x64\x61\x73\x73\x20\x64\x69\x72"
"\x20\x64\x61\x73\x20\x4c\x65\x62"
"\x65\x6e\x20\x67\x65\x6c\x69\x6e"
"\x67\x65\x2c\x0a\x62\x72\x65\x69"
"\x74\x65\x20\x64\x69\x63\x68\x20"
"\x77\x69\x65\x20\x65\x69\x6e\x20"
"\x46\x65\x69\x65\x72\x6b\x6c\x65"
"\x69\x64\x0a\xc3\xbc\x62\x65\x72"
"\x20\x64\x69\x65\x20\x73\x69\x6e"
"\x6e\x65\x6e\x64\x65\x6e\x20\x44"
"\x69\x6e\x67\x65\x2e\x2e\x2e\x0a",
.psize = 400,
.digest = "\xad\xb1\xc1\xe9\x56\x70\x31\x1d"
"\xbb\x5b\xdf\x5e\x70\x72\x1a\x57",
},
};
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).