Merge tag 'nvme-7.1-2026-03-27' of git://git.infradead.org/nvme into for-7.1/block

Pull NVMe updates from Keith:

"- Fabrics authentication updates (Eric, Alistar)
 - Enanced block queue limits support (Caleb)
 - Workqueue usage updates (Marco)
 - A new write zeroes device quirk (Robert)
 - Tagset cleanup fix for loop device (Nilay)"

* tag 'nvme-7.1-2026-03-27' of git://git.infradead.org/nvme: (41 commits)
  nvme-loop: do not cancel I/O and admin tagset during ctrl reset/shutdown
  nvme: add WQ_PERCPU to alloc_workqueue users
  nvmet-fc: add WQ_PERCPU to alloc_workqueue users
  nvmet: replace use of system_wq with system_percpu_wq
  nvme-auth: Don't propose NVME_AUTH_DHGROUP_NULL with SC_C
  nvme: Add the DHCHAP maximum HD IDs
  nvme-pci: add NVME_QUIRK_DISABLE_WRITE_ZEROES for Kingston OM3SGP4
  nvme: respect NVME_QUIRK_DISABLE_WRITE_ZEROES when wzsl is set
  nvmet: report NPDGL and NPDAL
  nvmet: use NVME_NS_FEAT_OPTPERF_SHIFT
  nvme: set discard_granularity from NPDG/NPDA
  nvme: add from0based() helper
  nvme: always issue I/O Command Set specific Identify Namespace
  nvme: update nvme_id_ns OPTPERF constants
  nvme: fold nvme_config_discard() into nvme_update_disk_info()
  nvme: add preferred I/O size fields to struct nvme_id_ns_nvm
  nvme: Allow reauth from sysfs
  nvme: Expose the tls_configured sysfs for secure concat connections
  nvmet-tcp: Don't free SQ on authentication success
  nvmet-tcp: Don't error if TLS is enabed on a reset
  ...
This commit is contained in:
Jens Axboe
2026-03-27 09:51:17 -06:00
27 changed files with 768 additions and 1302 deletions

View File

@@ -0,0 +1,13 @@
What: /sys/devices/virtual/nvme-fabrics/ctl/.../tls_configured_key
Date: November 2025
KernelVersion: 6.19
Contact: Linux NVMe mailing list <linux-nvme@lists.infradead.org>
Description:
The file is avaliable when using a secure concatanation
connection to a NVMe target. Reading the file will return
the serial of the currently negotiated key.
Writing 0 to the file will trigger a PSK reauthentication
(REPLACETLSPSK) with the target. After a reauthentication
the value returned by tls_configured_key will be the new
serial.

View File

@@ -141,12 +141,6 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
config CRYPTO_HKDF
tristate
select CRYPTO_SHA256 if CRYPTO_SELFTESTS
select CRYPTO_SHA512 if CRYPTO_SELFTESTS
select CRYPTO_HASH2
config CRYPTO_MANAGER
tristate
default CRYPTO_ALGAPI if CRYPTO_SELFTESTS

View File

@@ -36,7 +36,6 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o

View File

@@ -1,573 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
* Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
* "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
*
* Copyright 2019 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <crypto/hkdf.h>
#include <linux/module.h>
/*
* HKDF consists of two steps:
*
* 1. HKDF-Extract: extract a pseudorandom key from the input keying material
* and optional salt.
* 2. HKDF-Expand: expand the pseudorandom key into output keying material of
* any length, parameterized by an application-specific info string.
*
*/
/**
* hkdf_extract - HKDF-Extract (RFC 5869 section 2.2)
* @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The
* caller is responsible for setting the @prk afterwards.
* @ikm: input keying material
* @ikmlen: length of @ikm
* @salt: input salt value
* @saltlen: length of @salt
* @prk: resulting pseudorandom key
*
* Extracts a pseudorandom key @prk from the input keying material
* @ikm with length @ikmlen and salt @salt with length @saltlen.
* The length of @prk is given by the digest size of @hmac_tfm.
* For an 'unsalted' version of HKDF-Extract @salt must be set
* to all zeroes and @saltlen must be set to the length of @prk.
*
* Returns 0 on success with the pseudorandom key stored in @prk,
* or a negative errno value otherwise.
*/
int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
u8 *prk)
{
int err;
err = crypto_shash_setkey(hmac_tfm, salt, saltlen);
if (!err)
err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
return err;
}
EXPORT_SYMBOL_GPL(hkdf_extract);
/**
* hkdf_expand - HKDF-Expand (RFC 5869 section 2.3)
* @hmac_tfm: hash context keyed with pseudorandom key
* @info: application-specific information
* @infolen: length of @info
* @okm: output keying material
* @okmlen: length of @okm
*
* This expands the pseudorandom key, which was already keyed into @hmac_tfm,
* into @okmlen bytes of output keying material parameterized by the
* application-specific @info of length @infolen bytes.
* This is thread-safe and may be called by multiple threads in parallel.
*
* Returns 0 on success with output keying material stored in @okm,
* or a negative errno value otherwise.
*/
int hkdf_expand(struct crypto_shash *hmac_tfm,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen)
{
SHASH_DESC_ON_STACK(desc, hmac_tfm);
unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm);
int err;
const u8 *prev = NULL;
u8 counter = 1;
u8 tmp[HASH_MAX_DIGESTSIZE] = {};
if (WARN_ON(okmlen > 255 * hashlen))
return -EINVAL;
desc->tfm = hmac_tfm;
for (i = 0; i < okmlen; i += hashlen) {
err = crypto_shash_init(desc);
if (err)
goto out;
if (prev) {
err = crypto_shash_update(desc, prev, hashlen);
if (err)
goto out;
}
if (infolen) {
err = crypto_shash_update(desc, info, infolen);
if (err)
goto out;
}
BUILD_BUG_ON(sizeof(counter) != 1);
if (okmlen - i < hashlen) {
err = crypto_shash_finup(desc, &counter, 1, tmp);
if (err)
goto out;
memcpy(&okm[i], tmp, okmlen - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
if (err)
goto out;
}
counter++;
prev = &okm[i];
}
err = 0;
out:
if (unlikely(err))
memzero_explicit(okm, okmlen); /* so caller doesn't need to */
shash_desc_zero(desc);
memzero_explicit(tmp, HASH_MAX_DIGESTSIZE);
return err;
}
EXPORT_SYMBOL_GPL(hkdf_expand);
struct hkdf_testvec {
const char *test;
const u8 *ikm;
const u8 *salt;
const u8 *info;
const u8 *prk;
const u8 *okm;
u16 ikm_size;
u16 salt_size;
u16 info_size;
u16 prk_size;
u16 okm_size;
};
/*
* HKDF test vectors from RFC5869
*
* Additional HKDF test vectors from
* https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md
*/
static const struct hkdf_testvec hkdf_sha256_tv[] = {
{
.test = "basic hdkf test",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63"
"\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5",
.prk_size = 32,
.okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a"
"\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf"
"\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65",
.okm_size = 42,
}, {
.test = "hkdf test with long input",
.ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
.ikm_size = 80,
.salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
.salt_size = 80,
.info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.info_size = 80,
.prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c"
"\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44",
.prk_size = 32,
.okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34"
"\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c"
"\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09"
"\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71"
"\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f"
"\x1d\x87",
.okm_size = 82,
}, {
.test = "hkdf test with zero salt and info",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = NULL,
.salt_size = 0,
.info = NULL,
.info_size = 0,
.prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf"
"\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04",
.prk_size = 32,
.okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31"
"\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d"
"\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8",
.okm_size = 42,
}, {
.test = "hkdf test with short input",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 11,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c"
"\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf",
.prk_size = 32,
.okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43"
"\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d"
"\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5",
.okm_size = 42,
}, {
.test = "unsalted hkdf test with zero info",
.ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
"\x0c\x0c\x0c\x0c\x0c\x0c",
.ikm_size = 22,
.salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
.salt_size = 32,
.info = NULL,
.info_size = 0,
.prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d"
"\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf",
.prk_size = 32,
.okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53"
"\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a"
"\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7",
.okm_size = 42,
}
};
static const struct hkdf_testvec hkdf_sha384_tv[] = {
{
.test = "basic hkdf test",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30"
"\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde"
"\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde",
.prk_size = 48,
.okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38"
"\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7"
"\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5",
.okm_size = 42,
}, {
.test = "hkdf test with long input",
.ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
.ikm_size = 80,
.salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
.salt_size = 80,
.info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.info_size = 80,
.prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3"
"\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b"
"\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb",
.prk_size = 48,
.okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81"
"\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee"
"\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49"
"\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6"
"\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a"
"\x8b\x2e",
.okm_size = 82,
}, {
.test = "hkdf test with zero salt and info",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = NULL,
.salt_size = 0,
.info = NULL,
.info_size = 0,
.prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d"
"\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b"
"\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5",
.prk_size = 48,
.okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf"
"\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa"
"\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c",
.okm_size = 42,
}, {
.test = "hkdf test with short input",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 11,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0"
"\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a"
"\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72",
.prk_size = 48,
.okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75"
"\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1"
"\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a",
.okm_size = 42,
}, {
.test = "unsalted hkdf test with zero info",
.ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
"\x0c\x0c\x0c\x0c\x0c\x0c",
.ikm_size = 22,
.salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
.salt_size = 48,
.info = NULL,
.info_size = 0,
.prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e"
"\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc"
"\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5",
.prk_size = 48,
.okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78"
"\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b"
"\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53",
.okm_size = 42,
}
};
static const struct hkdf_testvec hkdf_sha512_tv[] = {
{
.test = "basic hkdf test",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b"
"\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26"
"\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f"
"\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37",
.prk_size = 64,
.okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4"
"\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93"
"\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb",
.okm_size = 42,
}, {
.test = "hkdf test with long input",
.ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
.ikm_size = 80,
.salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
.salt_size = 80,
.info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.info_size = 80,
.prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d"
"\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e"
"\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe"
"\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a",
.prk_size = 64,
.okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67"
"\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1"
"\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a"
"\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35"
"\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e"
"\x7a\x93",
.okm_size = 82,
}, {
.test = "hkdf test with zero salt and info",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
"\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 22,
.salt = NULL,
.salt_size = 0,
.info = NULL,
.info_size = 0,
.prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21"
"\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b"
"\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde"
"\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6",
.prk_size = 64,
.okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c"
"\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f"
"\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac",
.okm_size = 42,
}, {
.test = "hkdf test with short input",
.ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ikm_size = 11,
.salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
.salt_size = 13,
.info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
.info_size = 10,
.prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f"
"\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f"
"\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2"
"\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d",
.prk_size = 64,
.okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff"
"\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35"
"\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68",
.okm_size = 42,
}, {
.test = "unsalted hkdf test with zero info",
.ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
"\x0c\x0c\x0c\x0c\x0c\x0c",
.ikm_size = 22,
.salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
.salt_size = 64,
.info = NULL,
.info_size = 0,
.prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89"
"\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa"
"\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1"
"\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c",
.prk_size = 64,
.okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90"
"\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a"
"\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb",
.okm_size = 42,
}
};
static int hkdf_test(const char *shash, const struct hkdf_testvec *tv)
{ struct crypto_shash *tfm = NULL;
u8 *prk = NULL, *okm = NULL;
unsigned int prk_size;
const char *driver;
int err;
tfm = crypto_alloc_shash(shash, 0, 0);
if (IS_ERR(tfm)) {
pr_err("%s(%s): failed to allocate transform: %ld\n",
tv->test, shash, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_shash_driver_name(tfm);
prk_size = crypto_shash_digestsize(tfm);
prk = kzalloc(prk_size, GFP_KERNEL);
if (!prk) {
err = -ENOMEM;
goto out_free;
}
if (tv->prk_size != prk_size) {
pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n",
tv->test, driver, tv->prk_size, prk_size);
err = -EINVAL;
goto out_free;
}
err = hkdf_extract(tfm, tv->ikm, tv->ikm_size,
tv->salt, tv->salt_size, prk);
if (err) {
pr_err("%s(%s): hkdf_extract failed with %d\n",
tv->test, driver, err);
goto out_free;
}
if (memcmp(prk, tv->prk, tv->prk_size)) {
pr_err("%s(%s): hkdf_extract prk mismatch\n",
tv->test, driver);
print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE,
16, 1, prk, tv->prk_size, false);
err = -EINVAL;
goto out_free;
}
okm = kzalloc(tv->okm_size, GFP_KERNEL);
if (!okm) {
err = -ENOMEM;
goto out_free;
}
err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size);
if (err) {
pr_err("%s(%s): failed to set prk, error %d\n",
tv->test, driver, err);
goto out_free;
}
err = hkdf_expand(tfm, tv->info, tv->info_size,
okm, tv->okm_size);
if (err) {
pr_err("%s(%s): hkdf_expand() failed with %d\n",
tv->test, driver, err);
} else if (memcmp(okm, tv->okm, tv->okm_size)) {
pr_err("%s(%s): hkdf_expand() okm mismatch\n",
tv->test, driver);
print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE,
16, 1, okm, tv->okm_size, false);
err = -EINVAL;
}
out_free:
kfree(okm);
kfree(prk);
crypto_free_shash(tfm);
return err;
}
static int __init crypto_hkdf_module_init(void)
{
int ret = 0, i;
if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]);
if (ret)
return ret;
}
for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) {
ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]);
if (ret)
return ret;
}
for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) {
ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]);
if (ret)
return ret;
}
return 0;
}
static void __exit crypto_hkdf_module_exit(void) {}
late_initcall(crypto_hkdf_module_init);
module_exit(crypto_hkdf_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)");

View File

@@ -0,0 +1,6 @@
CONFIG_KUNIT=y
CONFIG_PCI=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_HOST_AUTH=y
CONFIG_NVME_AUTH_KUNIT_TEST=y

View File

@@ -7,9 +7,15 @@ config NVME_KEYRING
config NVME_AUTH
tristate
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
select CRYPTO_HKDF
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
config NVME_AUTH_KUNIT_TEST
tristate "KUnit tests for NVMe authentication" if !KUNIT_ALL_TESTS
depends on KUNIT && NVME_AUTH
default KUNIT_ALL_TESTS
help
Enable KUnit tests for some of the common code for NVMe over Fabrics
In-Band Authentication.

View File

@@ -7,3 +7,5 @@ obj-$(CONFIG_NVME_KEYRING) += nvme-keyring.o
nvme-auth-y += auth.o
nvme-keyring-y += keyring.o
obj-$(CONFIG_NVME_AUTH_KUNIT_TEST) += tests/auth_kunit.o

View File

@@ -9,14 +9,11 @@
#include <linux/prandom.h>
#include <linux/scatterlist.h>
#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include <crypto/hkdf.h>
#include <crypto/sha2.h>
#include <linux/nvme.h>
#include <linux/nvme-auth.h>
#define HKDF_MAX_HASHLEN 64
static u32 nvme_dhchap_seqnum;
static DEFINE_MUTEX(nvme_dhchap_mutex);
@@ -38,9 +35,9 @@ u32 nvme_auth_get_seqnum(void)
}
EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
static struct nvme_auth_dhgroup_map {
const char name[16];
const char kpp[16];
static const struct nvme_auth_dhgroup_map {
char name[16];
char kpp[16];
} dhgroup_map[] = {
[NVME_AUTH_DHGROUP_NULL] = {
.name = "null", .kpp = "null" },
@@ -89,25 +86,21 @@ u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
}
EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
static struct nvme_dhchap_hash_map {
static const struct nvme_dhchap_hash_map {
int len;
const char hmac[15];
const char digest[8];
char hmac[15];
} hash_map[] = {
[NVME_AUTH_HASH_SHA256] = {
.len = 32,
.hmac = "hmac(sha256)",
.digest = "sha256",
},
[NVME_AUTH_HASH_SHA384] = {
.len = 48,
.hmac = "hmac(sha384)",
.digest = "sha384",
},
[NVME_AUTH_HASH_SHA512] = {
.len = 64,
.hmac = "hmac(sha512)",
.digest = "sha512",
},
};
@@ -119,14 +112,6 @@ const char *nvme_auth_hmac_name(u8 hmac_id)
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
const char *nvme_auth_digest_name(u8 hmac_id)
{
if (hmac_id >= ARRAY_SIZE(hash_map))
return NULL;
return hash_map[hmac_id].digest;
}
EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
u8 nvme_auth_hmac_id(const char *hmac_name)
{
int i;
@@ -161,11 +146,10 @@ u32 nvme_auth_key_struct_size(u32 key_len)
}
EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash)
struct nvme_dhchap_key *nvme_auth_extract_key(const char *secret, u8 key_hash)
{
struct nvme_dhchap_key *key;
unsigned char *p;
const char *p;
u32 crc;
int ret, key_len;
size_t allocated_len = strlen(secret);
@@ -183,14 +167,14 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
pr_debug("base64 key decoding error %d\n",
key_len);
ret = key_len;
goto out_free_secret;
goto out_free_key;
}
if (key_len != 36 && key_len != 52 &&
key_len != 68) {
pr_err("Invalid key len %d\n", key_len);
ret = -EINVAL;
goto out_free_secret;
goto out_free_key;
}
/* The last four bytes is the CRC in little-endian format */
@@ -205,12 +189,12 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
pr_err("key crc mismatch (key %08x, crc %08x)\n",
get_unaligned_le32(key->key + key_len), crc);
ret = -EKEYREJECTED;
goto out_free_secret;
goto out_free_key;
}
key->len = key_len;
key->hash = key_hash;
return key;
out_free_secret:
out_free_key:
nvme_auth_free_key(key);
return ERR_PTR(ret);
}
@@ -237,12 +221,106 @@ void nvme_auth_free_key(struct nvme_dhchap_key *key)
}
EXPORT_SYMBOL_GPL(nvme_auth_free_key);
struct nvme_dhchap_key *nvme_auth_transform_key(
struct nvme_dhchap_key *key, char *nqn)
/*
* Start computing an HMAC value, given the algorithm ID and raw key.
*
* The context should be zeroized at the end of its lifetime. The caller can do
* that implicitly by calling nvme_auth_hmac_final(), or explicitly (needed when
* a context is abandoned without finalizing it) by calling memzero_explicit().
*/
int nvme_auth_hmac_init(struct nvme_auth_hmac_ctx *hmac, u8 hmac_id,
const u8 *key, size_t key_len)
{
const char *hmac_name;
struct crypto_shash *key_tfm;
SHASH_DESC_ON_STACK(shash, key_tfm);
hmac->hmac_id = hmac_id;
switch (hmac_id) {
case NVME_AUTH_HASH_SHA256:
hmac_sha256_init_usingrawkey(&hmac->sha256, key, key_len);
return 0;
case NVME_AUTH_HASH_SHA384:
hmac_sha384_init_usingrawkey(&hmac->sha384, key, key_len);
return 0;
case NVME_AUTH_HASH_SHA512:
hmac_sha512_init_usingrawkey(&hmac->sha512, key, key_len);
return 0;
}
pr_warn("%s: invalid hash algorithm %d\n", __func__, hmac_id);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_init);
void nvme_auth_hmac_update(struct nvme_auth_hmac_ctx *hmac, const u8 *data,
size_t data_len)
{
switch (hmac->hmac_id) {
case NVME_AUTH_HASH_SHA256:
hmac_sha256_update(&hmac->sha256, data, data_len);
return;
case NVME_AUTH_HASH_SHA384:
hmac_sha384_update(&hmac->sha384, data, data_len);
return;
case NVME_AUTH_HASH_SHA512:
hmac_sha512_update(&hmac->sha512, data, data_len);
return;
}
/* Unreachable because nvme_auth_hmac_init() validated hmac_id */
WARN_ON_ONCE(1);
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_update);
/* Finish computing an HMAC value. Note that this zeroizes the HMAC context. */
void nvme_auth_hmac_final(struct nvme_auth_hmac_ctx *hmac, u8 *out)
{
switch (hmac->hmac_id) {
case NVME_AUTH_HASH_SHA256:
hmac_sha256_final(&hmac->sha256, out);
return;
case NVME_AUTH_HASH_SHA384:
hmac_sha384_final(&hmac->sha384, out);
return;
case NVME_AUTH_HASH_SHA512:
hmac_sha512_final(&hmac->sha512, out);
return;
}
/* Unreachable because nvme_auth_hmac_init() validated hmac_id */
WARN_ON_ONCE(1);
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_final);
static int nvme_auth_hmac(u8 hmac_id, const u8 *key, size_t key_len,
const u8 *data, size_t data_len, u8 *out)
{
struct nvme_auth_hmac_ctx hmac;
int ret;
ret = nvme_auth_hmac_init(&hmac, hmac_id, key, key_len);
if (ret == 0) {
nvme_auth_hmac_update(&hmac, data, data_len);
nvme_auth_hmac_final(&hmac, out);
}
return ret;
}
static int nvme_auth_hash(u8 hmac_id, const u8 *data, size_t data_len, u8 *out)
{
switch (hmac_id) {
case NVME_AUTH_HASH_SHA256:
sha256(data, data_len, out);
return 0;
case NVME_AUTH_HASH_SHA384:
sha384(data, data_len, out);
return 0;
case NVME_AUTH_HASH_SHA512:
sha512(data, data_len, out);
return 0;
}
pr_warn("%s: invalid hash algorithm %d\n", __func__, hmac_id);
return -EINVAL;
}
struct nvme_dhchap_key *nvme_auth_transform_key(
const struct nvme_dhchap_key *key, const char *nqn)
{
struct nvme_auth_hmac_ctx hmac;
struct nvme_dhchap_key *transformed_key;
int ret, key_len;
@@ -257,118 +335,33 @@ struct nvme_dhchap_key *nvme_auth_transform_key(
return ERR_PTR(-ENOMEM);
return transformed_key;
}
hmac_name = nvme_auth_hmac_name(key->hash);
if (!hmac_name) {
pr_warn("Invalid key hash id %d\n", key->hash);
return ERR_PTR(-EINVAL);
}
key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(key_tfm))
return ERR_CAST(key_tfm);
key_len = crypto_shash_digestsize(key_tfm);
ret = nvme_auth_hmac_init(&hmac, key->hash, key->key, key->len);
if (ret)
return ERR_PTR(ret);
key_len = nvme_auth_hmac_hash_len(key->hash);
transformed_key = nvme_auth_alloc_key(key_len, key->hash);
if (!transformed_key) {
ret = -ENOMEM;
goto out_free_key;
memzero_explicit(&hmac, sizeof(hmac));
return ERR_PTR(-ENOMEM);
}
shash->tfm = key_tfm;
ret = crypto_shash_setkey(key_tfm, key->key, key->len);
if (ret < 0)
goto out_free_transformed_key;
ret = crypto_shash_init(shash);
if (ret < 0)
goto out_free_transformed_key;
ret = crypto_shash_update(shash, nqn, strlen(nqn));
if (ret < 0)
goto out_free_transformed_key;
ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
if (ret < 0)
goto out_free_transformed_key;
ret = crypto_shash_final(shash, transformed_key->key);
if (ret < 0)
goto out_free_transformed_key;
crypto_free_shash(key_tfm);
nvme_auth_hmac_update(&hmac, nqn, strlen(nqn));
nvme_auth_hmac_update(&hmac, "NVMe-over-Fabrics", 17);
nvme_auth_hmac_final(&hmac, transformed_key->key);
return transformed_key;
out_free_transformed_key:
nvme_auth_free_key(transformed_key);
out_free_key:
crypto_free_shash(key_tfm);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len,
const u8 *challenge, u8 *aug, size_t hlen)
{
const char *digest_name;
struct crypto_shash *tfm;
u8 hashed_key[NVME_AUTH_MAX_DIGEST_SIZE];
int ret;
digest_name = nvme_auth_digest_name(hmac_id);
if (!digest_name) {
pr_debug("%s: failed to get digest for %d\n", __func__,
hmac_id);
return -EINVAL;
}
tfm = crypto_alloc_shash(digest_name, 0, 0);
if (IS_ERR(tfm))
return -ENOMEM;
ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
if (ret < 0)
pr_debug("%s: Failed to hash digest len %zu\n", __func__,
skey_len);
crypto_free_shash(tfm);
return ret;
}
int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen)
{
struct crypto_shash *tfm;
u8 *hashed_key;
const char *hmac_name;
int ret;
hashed_key = kmalloc(hlen, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = nvme_auth_hash_skey(hmac_id, skey,
skey_len, hashed_key);
if (ret < 0)
goto out_free_key;
hmac_name = nvme_auth_hmac_name(hmac_id);
if (!hmac_name) {
pr_warn("%s: invalid hash algorithm %d\n",
__func__, hmac_id);
ret = -EINVAL;
goto out_free_key;
}
tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out_free_key;
}
ret = crypto_shash_setkey(tfm, hashed_key, hlen);
ret = nvme_auth_hash(hmac_id, skey, skey_len, hashed_key);
if (ret)
goto out_free_hash;
ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
out_free_hash:
crypto_free_shash(tfm);
out_free_key:
kfree_sensitive(hashed_key);
return ret;
ret = nvme_auth_hmac(hmac_id, hashed_key, hlen, challenge, hlen, aug);
memzero_explicit(hashed_key, sizeof(hashed_key));
return ret;
}
EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
@@ -411,7 +404,7 @@ int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
u8 *ctrl_key, size_t ctrl_key_len,
const u8 *ctrl_key, size_t ctrl_key_len,
u8 *sess_key, size_t sess_key_len)
{
struct kpp_request *req;
@@ -438,7 +431,7 @@ int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
}
EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
int nvme_auth_parse_key(const char *secret, struct nvme_dhchap_key **ret_key)
{
struct nvme_dhchap_key *key;
u8 key_hash;
@@ -461,7 +454,7 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
*ret_key = key;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
EXPORT_SYMBOL_GPL(nvme_auth_parse_key);
/**
* nvme_auth_generate_psk - Generate a PSK for TLS
@@ -486,66 +479,32 @@ EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
* Returns 0 on success with a valid generated PSK pointer in @ret_psk and
* the length of @ret_psk in @ret_len, or a negative error number otherwise.
*/
int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len)
int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len,
const u8 *c1, const u8 *c2, size_t hash_len,
u8 **ret_psk, size_t *ret_len)
{
struct crypto_shash *tfm;
SHASH_DESC_ON_STACK(shash, tfm);
size_t psk_len = nvme_auth_hmac_hash_len(hmac_id);
struct nvme_auth_hmac_ctx hmac;
u8 *psk;
const char *hmac_name;
int ret, psk_len;
int ret;
if (!c1 || !c2)
return -EINVAL;
hmac_name = nvme_auth_hmac_name(hmac_id);
if (!hmac_name) {
pr_warn("%s: invalid hash algorithm %d\n",
__func__, hmac_id);
return -EINVAL;
}
tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
psk_len = crypto_shash_digestsize(tfm);
ret = nvme_auth_hmac_init(&hmac, hmac_id, skey, skey_len);
if (ret)
return ret;
psk = kzalloc(psk_len, GFP_KERNEL);
if (!psk) {
ret = -ENOMEM;
goto out_free_tfm;
memzero_explicit(&hmac, sizeof(hmac));
return -ENOMEM;
}
shash->tfm = tfm;
ret = crypto_shash_setkey(tfm, skey, skey_len);
if (ret)
goto out_free_psk;
ret = crypto_shash_init(shash);
if (ret)
goto out_free_psk;
ret = crypto_shash_update(shash, c1, hash_len);
if (ret)
goto out_free_psk;
ret = crypto_shash_update(shash, c2, hash_len);
if (ret)
goto out_free_psk;
ret = crypto_shash_final(shash, psk);
if (!ret) {
*ret_psk = psk;
*ret_len = psk_len;
}
out_free_psk:
if (ret)
kfree_sensitive(psk);
out_free_tfm:
crypto_free_shash(tfm);
return ret;
nvme_auth_hmac_update(&hmac, c1, hash_len);
nvme_auth_hmac_update(&hmac, c2, hash_len);
nvme_auth_hmac_final(&hmac, psk);
*ret_psk = psk;
*ret_len = psk_len;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_psk);
@@ -584,158 +543,70 @@ EXPORT_SYMBOL_GPL(nvme_auth_generate_psk);
* Returns 0 on success with a valid digest pointer in @ret_digest, or a
* negative error number on failure.
*/
int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
char *subsysnqn, char *hostnqn, u8 **ret_digest)
int nvme_auth_generate_digest(u8 hmac_id, const u8 *psk, size_t psk_len,
const char *subsysnqn, const char *hostnqn,
char **ret_digest)
{
struct crypto_shash *tfm;
SHASH_DESC_ON_STACK(shash, tfm);
u8 *digest, *enc;
const char *hmac_name;
size_t digest_len, hmac_len;
struct nvme_auth_hmac_ctx hmac;
u8 digest[NVME_AUTH_MAX_DIGEST_SIZE];
size_t hash_len = nvme_auth_hmac_hash_len(hmac_id);
char *enc;
size_t enc_len;
int ret;
if (WARN_ON(!subsysnqn || !hostnqn))
return -EINVAL;
hmac_name = nvme_auth_hmac_name(hmac_id);
if (!hmac_name) {
if (hash_len == 0) {
pr_warn("%s: invalid hash algorithm %d\n",
__func__, hmac_id);
return -EINVAL;
}
switch (nvme_auth_hmac_hash_len(hmac_id)) {
switch (hash_len) {
case 32:
hmac_len = 44;
enc_len = 44;
break;
case 48:
hmac_len = 64;
enc_len = 64;
break;
default:
pr_warn("%s: invalid hash algorithm '%s'\n",
__func__, hmac_name);
__func__, nvme_auth_hmac_name(hmac_id));
return -EINVAL;
}
enc = kzalloc(hmac_len + 1, GFP_KERNEL);
if (!enc)
return -ENOMEM;
tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out_free_enc;
}
digest_len = crypto_shash_digestsize(tfm);
digest = kzalloc(digest_len, GFP_KERNEL);
if (!digest) {
enc = kzalloc(enc_len + 1, GFP_KERNEL);
if (!enc) {
ret = -ENOMEM;
goto out_free_tfm;
goto out;
}
shash->tfm = tfm;
ret = crypto_shash_setkey(tfm, psk, psk_len);
ret = nvme_auth_hmac_init(&hmac, hmac_id, psk, psk_len);
if (ret)
goto out_free_digest;
goto out;
nvme_auth_hmac_update(&hmac, hostnqn, strlen(hostnqn));
nvme_auth_hmac_update(&hmac, " ", 1);
nvme_auth_hmac_update(&hmac, subsysnqn, strlen(subsysnqn));
nvme_auth_hmac_update(&hmac, " NVMe-over-Fabrics", 18);
nvme_auth_hmac_final(&hmac, digest);
ret = crypto_shash_init(shash);
if (ret)
goto out_free_digest;
ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn));
if (ret)
goto out_free_digest;
ret = crypto_shash_update(shash, " ", 1);
if (ret)
goto out_free_digest;
ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn));
if (ret)
goto out_free_digest;
ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18);
if (ret)
goto out_free_digest;
ret = crypto_shash_final(shash, digest);
if (ret)
goto out_free_digest;
ret = base64_encode(digest, digest_len, enc, true, BASE64_STD);
if (ret < hmac_len) {
ret = base64_encode(digest, hash_len, enc, true, BASE64_STD);
if (ret < enc_len) {
ret = -ENOKEY;
goto out_free_digest;
goto out;
}
*ret_digest = enc;
ret = 0;
out_free_digest:
kfree_sensitive(digest);
out_free_tfm:
crypto_free_shash(tfm);
out_free_enc:
out:
if (ret)
kfree_sensitive(enc);
memzero_explicit(digest, sizeof(digest));
return ret;
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
/**
* hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1)
* @hmac_tfm: hash context keyed with pseudorandom key
* @label: ASCII label without "tls13 " prefix
* @labellen: length of @label
* @context: context bytes
* @contextlen: length of @context
* @okm: output keying material
* @okmlen: length of @okm
*
* Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand().
*
* Returns 0 on success with output keying material stored in @okm,
* or a negative errno value otherwise.
*/
static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
const u8 *label, unsigned int labellen,
const u8 *context, unsigned int contextlen,
u8 *okm, unsigned int okmlen)
{
int err;
u8 *info;
unsigned int infolen;
const char *tls13_prefix = "tls13 ";
unsigned int prefixlen = strlen(tls13_prefix);
if (WARN_ON(labellen > (255 - prefixlen)))
return -EINVAL;
if (WARN_ON(contextlen > 255))
return -EINVAL;
infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen);
info = kzalloc(infolen, GFP_KERNEL);
if (!info)
return -ENOMEM;
/* HkdfLabel.Length */
put_unaligned_be16(okmlen, info);
/* HkdfLabel.Label */
info[2] = prefixlen + labellen;
memcpy(info + 3, tls13_prefix, prefixlen);
memcpy(info + 3 + prefixlen, label, labellen);
/* HkdfLabel.Context */
info[3 + prefixlen + labellen] = contextlen;
memcpy(info + 4 + prefixlen + labellen, context, contextlen);
err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen);
kfree_sensitive(info);
return err;
}
/**
* nvme_auth_derive_tls_psk - Derive TLS PSK
* @hmac_id: Hash function identifier
@@ -763,82 +634,92 @@ static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
* Returns 0 on success with a valid psk pointer in @ret_psk or a negative
* error number otherwise.
*/
int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
u8 *psk_digest, u8 **ret_psk)
int nvme_auth_derive_tls_psk(int hmac_id, const u8 *psk, size_t psk_len,
const char *psk_digest, u8 **ret_psk)
{
struct crypto_shash *hmac_tfm;
const char *hmac_name;
const char *label = "nvme-tls-psk";
static const char default_salt[HKDF_MAX_HASHLEN];
size_t prk_len;
const char *ctx;
unsigned char *prk, *tls_key;
static const u8 default_salt[NVME_AUTH_MAX_DIGEST_SIZE];
static const char label[] = "tls13 nvme-tls-psk";
const size_t label_len = sizeof(label) - 1;
u8 prk[NVME_AUTH_MAX_DIGEST_SIZE];
size_t hash_len, ctx_len;
u8 *hmac_data = NULL, *tls_key;
size_t i;
int ret;
hmac_name = nvme_auth_hmac_name(hmac_id);
if (!hmac_name) {
hash_len = nvme_auth_hmac_hash_len(hmac_id);
if (hash_len == 0) {
pr_warn("%s: invalid hash algorithm %d\n",
__func__, hmac_id);
return -EINVAL;
}
if (hmac_id == NVME_AUTH_HASH_SHA512) {
pr_warn("%s: unsupported hash algorithm %s\n",
__func__, hmac_name);
__func__, nvme_auth_hmac_name(hmac_id));
return -EINVAL;
}
hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(hmac_tfm))
return PTR_ERR(hmac_tfm);
prk_len = crypto_shash_digestsize(hmac_tfm);
prk = kzalloc(prk_len, GFP_KERNEL);
if (!prk) {
ret = -ENOMEM;
goto out_free_shash;
if (psk_len != hash_len) {
pr_warn("%s: unexpected psk_len %zu\n", __func__, psk_len);
return -EINVAL;
}
if (WARN_ON(prk_len > HKDF_MAX_HASHLEN)) {
/* HKDF-Extract */
ret = nvme_auth_hmac(hmac_id, default_salt, hash_len, psk, psk_len,
prk);
if (ret)
goto out;
/*
* HKDF-Expand-Label (RFC 8446 section 7.1), with output length equal to
* the hash length (so only a single HMAC operation is needed)
*/
hmac_data = kmalloc(/* output length */ 2 +
/* label */ 1 + label_len +
/* context (max) */ 1 + 3 + 1 + strlen(psk_digest) +
/* counter */ 1,
GFP_KERNEL);
if (!hmac_data) {
ret = -ENOMEM;
goto out;
}
/* output length */
i = 0;
hmac_data[i++] = hash_len >> 8;
hmac_data[i++] = hash_len;
/* label */
static_assert(label_len <= 255);
hmac_data[i] = label_len;
memcpy(&hmac_data[i + 1], label, label_len);
i += 1 + label_len;
/* context */
ctx_len = sprintf(&hmac_data[i + 1], "%02d %s", hmac_id, psk_digest);
if (ctx_len > 255) {
ret = -EINVAL;
goto out_free_prk;
goto out;
}
ret = hkdf_extract(hmac_tfm, psk, psk_len,
default_salt, prk_len, prk);
if (ret)
goto out_free_prk;
hmac_data[i] = ctx_len;
i += 1 + ctx_len;
ret = crypto_shash_setkey(hmac_tfm, prk, prk_len);
if (ret)
goto out_free_prk;
ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest);
if (!ctx) {
ret = -ENOMEM;
goto out_free_prk;
}
/* counter (this overwrites the NUL terminator written by sprintf) */
hmac_data[i++] = 1;
tls_key = kzalloc(psk_len, GFP_KERNEL);
if (!tls_key) {
ret = -ENOMEM;
goto out_free_ctx;
goto out;
}
ret = hkdf_expand_label(hmac_tfm,
label, strlen(label),
ctx, strlen(ctx),
tls_key, psk_len);
ret = nvme_auth_hmac(hmac_id, prk, hash_len, hmac_data, i, tls_key);
if (ret) {
kfree(tls_key);
goto out_free_ctx;
kfree_sensitive(tls_key);
goto out;
}
*ret_psk = tls_key;
out_free_ctx:
kfree(ctx);
out_free_prk:
kfree(prk);
out_free_shash:
crypto_free_shash(hmac_tfm);
out:
kfree_sensitive(hmac_data);
memzero_explicit(prk, sizeof(prk));
return ret;
}
EXPORT_SYMBOL_GPL(nvme_auth_derive_tls_psk);

View File

@@ -0,0 +1,175 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Unit tests for NVMe authentication functions
*
* Copyright 2026 Google LLC
*/
#include <crypto/sha2.h>
#include <kunit/test.h>
#include <linux/nvme.h>
#include <linux/nvme-auth.h>
#include <linux/slab.h>
struct nvme_auth_test_values {
u8 hmac_id;
size_t hash_len;
u8 expected_psk[NVME_AUTH_MAX_DIGEST_SIZE];
char *expected_psk_digest;
u8 expected_tls_psk[NVME_AUTH_MAX_DIGEST_SIZE];
};
static void kfree_action(void *ptr)
{
kfree(ptr);
}
static void kunit_add_kfree_action(struct kunit *test, void *ptr)
{
KUNIT_ASSERT_EQ(test, 0,
kunit_add_action_or_reset(test, kfree_action, ptr));
}
/*
* Test the derivation of a TLS PSK from the initial skey. The vals parameter
* gives the expected value of tls_psk as well as the intermediate values psk
* and psk_digest. The inputs are implicitly the fixed values set below.
*/
static void
test_nvme_auth_derive_tls_psk(struct kunit *test,
const struct nvme_auth_test_values *vals)
{
const u8 hmac_id = vals->hmac_id;
const size_t hash_len = vals->hash_len;
const size_t skey_len = hash_len;
u8 skey[NVME_AUTH_MAX_DIGEST_SIZE];
u8 c1[NVME_AUTH_MAX_DIGEST_SIZE];
u8 c2[NVME_AUTH_MAX_DIGEST_SIZE];
const char *subsysnqn = "subsysnqn";
const char *hostnqn = "hostnqn";
u8 *psk = NULL, *tls_psk = NULL;
char *psk_digest = NULL;
size_t psk_len;
int ret;
for (int i = 0; i < NVME_AUTH_MAX_DIGEST_SIZE; i++) {
skey[i] = 'A' + i;
c1[i] = i;
c2[i] = 0xff - i;
}
ret = nvme_auth_generate_psk(hmac_id, skey, skey_len, c1, c2, hash_len,
&psk, &psk_len);
kunit_add_kfree_action(test, psk);
KUNIT_ASSERT_EQ(test, 0, ret);
KUNIT_ASSERT_EQ(test, hash_len, psk_len);
KUNIT_ASSERT_MEMEQ(test, vals->expected_psk, psk, psk_len);
ret = nvme_auth_generate_digest(hmac_id, psk, psk_len, subsysnqn,
hostnqn, &psk_digest);
kunit_add_kfree_action(test, psk_digest);
if (vals->expected_psk_digest == NULL) {
/*
* Algorithm has an ID assigned but is not supported by
* nvme_auth_generate_digest().
*/
KUNIT_ASSERT_EQ(test, -EINVAL, ret);
return;
}
KUNIT_ASSERT_EQ(test, 0, ret);
KUNIT_ASSERT_STREQ(test, vals->expected_psk_digest, psk_digest);
ret = nvme_auth_derive_tls_psk(hmac_id, psk, psk_len, psk_digest,
&tls_psk);
kunit_add_kfree_action(test, tls_psk);
KUNIT_ASSERT_EQ(test, 0, ret);
KUNIT_ASSERT_MEMEQ(test, vals->expected_tls_psk, tls_psk, psk_len);
}
static void test_nvme_auth_derive_tls_psk_hmac_sha256(struct kunit *test)
{
static const struct nvme_auth_test_values vals = {
.hmac_id = NVME_AUTH_HASH_SHA256,
.hash_len = SHA256_DIGEST_SIZE,
.expected_psk = {
0x17, 0x33, 0xc5, 0x9f, 0xa7, 0xf4, 0x8f, 0xcf,
0x37, 0xf5, 0xf2, 0x6f, 0xc4, 0xff, 0x02, 0x68,
0xad, 0x4f, 0x78, 0xe0, 0x30, 0xf4, 0xf3, 0xb0,
0xbf, 0xd1, 0xd4, 0x7e, 0x7b, 0xb1, 0x44, 0x7a,
},
.expected_psk_digest = "OldoKuTfKddMuyCznAZojkWD7P4D9/AtzDzLimtOxqI=",
.expected_tls_psk = {
0x3c, 0x17, 0xda, 0x62, 0x84, 0x74, 0xa0, 0x4d,
0x22, 0x47, 0xc4, 0xca, 0xb4, 0x79, 0x68, 0xc9,
0x15, 0x38, 0x81, 0x93, 0xf7, 0xc0, 0x71, 0xbd,
0x94, 0x89, 0xcc, 0x36, 0x66, 0xcd, 0x7c, 0xc8,
},
};
test_nvme_auth_derive_tls_psk(test, &vals);
}
static void test_nvme_auth_derive_tls_psk_hmac_sha384(struct kunit *test)
{
static const struct nvme_auth_test_values vals = {
.hmac_id = NVME_AUTH_HASH_SHA384,
.hash_len = SHA384_DIGEST_SIZE,
.expected_psk = {
0xf1, 0x4b, 0x2d, 0xd3, 0x23, 0x4c, 0x45, 0x96,
0x94, 0xd3, 0xbc, 0x63, 0xf8, 0x96, 0x8b, 0xd6,
0xb3, 0x7c, 0x2c, 0x6d, 0xe8, 0x49, 0xe2, 0x2e,
0x11, 0x87, 0x49, 0x00, 0x1c, 0xe4, 0xbb, 0xe8,
0x64, 0x0b, 0x9e, 0x3a, 0x74, 0x8c, 0xb1, 0x1c,
0xe4, 0xb1, 0xd7, 0x1d, 0x35, 0x9c, 0xce, 0x39,
},
.expected_psk_digest = "cffMWk8TSS7HOQebjgYEIkrPrjWPV4JE5cdPB8WhEvY4JBW5YynKyv66XscN4A9n",
.expected_tls_psk = {
0x27, 0x74, 0x75, 0x32, 0x33, 0x53, 0x7b, 0x3f,
0xa5, 0x0e, 0xb7, 0xd1, 0x6a, 0x8e, 0x43, 0x45,
0x7d, 0x85, 0xf4, 0x90, 0x6c, 0x00, 0x5b, 0x22,
0x36, 0x61, 0x6c, 0x5d, 0x80, 0x93, 0x9d, 0x08,
0x98, 0xff, 0xf1, 0x5b, 0xb8, 0xb7, 0x71, 0x19,
0xd2, 0xbe, 0x0a, 0xac, 0x42, 0x3e, 0x75, 0x90,
},
};
test_nvme_auth_derive_tls_psk(test, &vals);
}
static void test_nvme_auth_derive_tls_psk_hmac_sha512(struct kunit *test)
{
static const struct nvme_auth_test_values vals = {
.hmac_id = NVME_AUTH_HASH_SHA512,
.hash_len = SHA512_DIGEST_SIZE,
.expected_psk = {
0x9c, 0x9f, 0x08, 0x9a, 0x61, 0x8b, 0x47, 0xd2,
0xd7, 0x5f, 0x4b, 0x6c, 0x28, 0x07, 0x04, 0x24,
0x48, 0x7b, 0x44, 0x5d, 0xd9, 0x6e, 0x70, 0xc4,
0xc0, 0x9b, 0x55, 0xe8, 0xb6, 0x00, 0x01, 0x52,
0xa3, 0x36, 0x3c, 0x34, 0x54, 0x04, 0x3f, 0x38,
0xf0, 0xb8, 0x50, 0x36, 0xde, 0xd4, 0x06, 0x55,
0x35, 0x0a, 0xa8, 0x7b, 0x8b, 0x6a, 0x28, 0x2b,
0x5c, 0x1a, 0xca, 0xe1, 0x62, 0x33, 0xdd, 0x5b,
},
/* nvme_auth_generate_digest() doesn't support SHA-512 yet. */
.expected_psk_digest = NULL,
};
test_nvme_auth_derive_tls_psk(test, &vals);
}
static struct kunit_case nvme_auth_test_cases[] = {
KUNIT_CASE(test_nvme_auth_derive_tls_psk_hmac_sha256),
KUNIT_CASE(test_nvme_auth_derive_tls_psk_hmac_sha384),
KUNIT_CASE(test_nvme_auth_derive_tls_psk_hmac_sha512),
{},
};
static struct kunit_suite nvme_auth_test_suite = {
.name = "nvme-auth",
.test_cases = nvme_auth_test_cases,
};
kunit_test_suite(nvme_auth_test_suite);
MODULE_DESCRIPTION("Unit tests for NVMe authentication functions");
MODULE_LICENSE("GPL");

View File

@@ -7,7 +7,6 @@
#include <linux/base64.h>
#include <linux/prandom.h>
#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include "nvme.h"
#include "fabrics.h"
@@ -22,7 +21,6 @@ struct nvme_dhchap_queue_context {
struct list_head entry;
struct work_struct auth_work;
struct nvme_ctrl *ctrl;
struct crypto_shash *shash_tfm;
struct crypto_kpp *dh_tfm;
struct nvme_dhchap_key *transformed_key;
void *buf;
@@ -38,9 +36,9 @@ struct nvme_dhchap_queue_context {
u8 hash_id;
u8 sc_c;
size_t hash_len;
u8 c1[64];
u8 c2[64];
u8 response[64];
u8 c1[NVME_AUTH_MAX_DIGEST_SIZE];
u8 c2[NVME_AUTH_MAX_DIGEST_SIZE];
u8 response[NVME_AUTH_MAX_DIGEST_SIZE];
u8 *ctrl_key;
u8 *host_key;
u8 *sess_key;
@@ -125,6 +123,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
{
struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
u8 dh_list_offset = NVME_AUTH_DHCHAP_MAX_DH_IDS;
u8 *idlist = data->auth_protocol[0].dhchap.idlist;
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
@@ -141,21 +141,22 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
} else
data->sc_c = NVME_AUTH_SECP_NOSC;
chap->sc_c = data->sc_c;
data->napd = 1;
data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
data->auth_protocol[0].dhchap.halen = 3;
data->auth_protocol[0].dhchap.dhlen = 6;
data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
chap->sc_c = data->sc_c;
idlist[0] = NVME_AUTH_HASH_SHA256;
idlist[1] = NVME_AUTH_HASH_SHA384;
idlist[2] = NVME_AUTH_HASH_SHA512;
if (chap->sc_c == NVME_AUTH_SECP_NOSC)
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_NULL;
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_2048;
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_3072;
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_4096;
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_6144;
idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_8192;
data->auth_protocol[0].dhchap.dhlen =
dh_list_offset - NVME_AUTH_DHCHAP_MAX_DH_IDS;
return size;
}
@@ -183,38 +184,17 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
return -EPROTO;
}
if (chap->hash_id == data->hashid && chap->shash_tfm &&
!strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
if (chap->hash_id == data->hashid && chap->hash_len == data->hl) {
dev_dbg(ctrl->device,
"qid %d: reuse existing hash %s\n",
chap->qid, hmac_name);
goto select_kpp;
}
/* Reset if hash cannot be reused */
if (chap->shash_tfm) {
crypto_free_shash(chap->shash_tfm);
chap->hash_id = 0;
chap->hash_len = 0;
}
chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(chap->shash_tfm)) {
dev_warn(ctrl->device,
"qid %d: failed to allocate hash %s, error %ld\n",
chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
return -ENOMEM;
}
if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
if (nvme_auth_hmac_hash_len(data->hashid) != data->hl) {
dev_warn(ctrl->device,
"qid %d: invalid hash length %d\n",
chap->qid, data->hl);
crypto_free_shash(chap->shash_tfm);
chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
return -EPROTO;
}
@@ -434,7 +414,7 @@ static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
struct nvme_auth_hmac_ctx hmac;
u8 buf[4], *challenge = chap->c1;
int ret;
@@ -454,13 +434,11 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
__func__, chap->qid);
}
ret = crypto_shash_setkey(chap->shash_tfm,
chap->transformed_key->key, chap->transformed_key->len);
if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
chap->qid, ret);
ret = nvme_auth_hmac_init(&hmac, chap->hash_id,
chap->transformed_key->key,
chap->transformed_key->len);
if (ret)
goto out;
}
if (chap->dh_tfm) {
challenge = kmalloc(chap->hash_len, GFP_KERNEL);
@@ -477,51 +455,36 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
goto out;
}
shash->tfm = chap->shash_tfm;
ret = crypto_shash_init(shash);
if (ret)
goto out;
ret = crypto_shash_update(shash, challenge, chap->hash_len);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
put_unaligned_le32(chap->s1, buf);
ret = crypto_shash_update(shash, buf, 4);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 4);
put_unaligned_le16(chap->transaction, buf);
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 2);
*buf = chap->sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "HostHost", 8);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
strlen(ctrl->opts->host->nqn));
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, "HostHost", 8);
nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
strlen(ctrl->opts->host->nqn));
memset(buf, 0, sizeof(buf));
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
strlen(ctrl->opts->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, chap->response);
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
strlen(ctrl->opts->subsysnqn));
nvme_auth_hmac_final(&hmac, chap->response);
ret = 0;
out:
if (challenge != chap->c1)
kfree(challenge);
memzero_explicit(&hmac, sizeof(hmac));
return ret;
}
static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
struct nvme_auth_hmac_ctx hmac;
struct nvme_dhchap_key *transformed_key;
u8 buf[4], *challenge = chap->c2;
int ret;
@@ -533,10 +496,10 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
return ret;
}
ret = crypto_shash_setkey(chap->shash_tfm,
transformed_key->key, transformed_key->len);
ret = nvme_auth_hmac_init(&hmac, chap->hash_id, transformed_key->key,
transformed_key->len);
if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
dev_warn(ctrl->device, "qid %d: failed to init hmac, error %d\n",
chap->qid, ret);
goto out;
}
@@ -563,43 +526,29 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
__func__, chap->qid, ctrl->opts->subsysnqn);
dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
__func__, chap->qid, ctrl->opts->host->nqn);
shash->tfm = chap->shash_tfm;
ret = crypto_shash_init(shash);
if (ret)
goto out;
ret = crypto_shash_update(shash, challenge, chap->hash_len);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
put_unaligned_le32(chap->s2, buf);
ret = crypto_shash_update(shash, buf, 4);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 4);
put_unaligned_le16(chap->transaction, buf);
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 2);
memset(buf, 0, 4);
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "Controller", 10);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
strlen(ctrl->opts->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
strlen(ctrl->opts->host->nqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, chap->response);
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, "Controller", 10);
nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
strlen(ctrl->opts->subsysnqn));
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
strlen(ctrl->opts->host->nqn));
nvme_auth_hmac_final(&hmac, chap->response);
ret = 0;
out:
if (challenge != chap->c2)
kfree(challenge);
memzero_explicit(&hmac, sizeof(hmac));
nvme_auth_free_key(transformed_key);
return ret;
}
@@ -689,8 +638,6 @@ static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
{
nvme_auth_reset_dhchap(chap);
chap->authenticated = false;
if (chap->shash_tfm)
crypto_free_shash(chap->shash_tfm);
if (chap->dh_tfm)
crypto_free_kpp(chap->dh_tfm);
}
@@ -708,7 +655,8 @@ EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
u8 *psk, *digest, *tls_psk;
u8 *psk, *tls_psk;
char *digest;
struct key *tls_key;
size_t psk_len;
int ret = 0;
@@ -1071,12 +1019,11 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
if (!ctrl->opts)
return 0;
ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
&ctrl->host_key);
ret = nvme_auth_parse_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
if (ret)
return ret;
ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
&ctrl->ctrl_key);
ret = nvme_auth_parse_key(ctrl->opts->dhchap_ctrl_secret,
&ctrl->ctrl_key);
if (ret)
goto err_free_dhchap_secret;

View File

@@ -1884,26 +1884,6 @@ static bool nvme_init_integrity(struct nvme_ns_head *head,
return true;
}
static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
lim->max_hw_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
lim->max_hw_discard_sectors = UINT_MAX;
else
lim->max_hw_discard_sectors = 0;
lim->discard_granularity = lim->logical_block_size;
if (ctrl->dmrl)
lim->max_discard_segments = ctrl->dmrl;
else
lim->max_discard_segments = NVME_DSM_MAX_RANGES;
}
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
@@ -2079,12 +2059,15 @@ static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
}
static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
struct queue_limits *lim)
struct nvme_id_ns_nvm *nvm, struct queue_limits *lim)
{
struct nvme_ns_head *head = ns->head;
struct nvme_ctrl *ctrl = ns->ctrl;
u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
u32 npdg = 1, npda = 1;
bool valid = true;
u8 optperf;
/*
* The block layer can't support LBA sizes larger than the page size
@@ -2099,7 +2082,12 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
phys_bs = bs;
atomic_bs = nvme_configure_atomic_write(ns, id, lim, bs);
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
optperf = id->nsfeat >> NVME_NS_FEAT_OPTPERF_SHIFT;
if (ctrl->vs >= NVME_VS(2, 1, 0))
optperf &= NVME_NS_FEAT_OPTPERF_MASK_2_1;
else
optperf &= NVME_NS_FEAT_OPTPERF_MASK;
if (optperf) {
/* NPWG = Namespace Preferred Write Granularity */
phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
@@ -2116,11 +2104,54 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
lim->physical_block_size = min(phys_bs, atomic_bs);
lim->io_min = phys_bs;
lim->io_opt = io_opt;
if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
(ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
if ((ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
(ctrl->oncs & NVME_CTRL_ONCS_DSM))
lim->max_write_zeroes_sectors = UINT_MAX;
else
lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
lim->max_write_zeroes_sectors = ctrl->max_zeroes_sectors;
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
lim->max_hw_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
lim->max_hw_discard_sectors = UINT_MAX;
else
lim->max_hw_discard_sectors = 0;
/*
* NVMe namespaces advertise both a preferred deallocate granularity
* (for a discard length) and alignment (for a discard starting offset).
* However, Linux block devices advertise a single discard_granularity.
* From NVM Command Set specification 1.1 section 5.2.2, the NPDGL/NPDAL
* fields in the NVM Command Set Specific Identify Namespace structure
* are preferred to NPDG/NPDA in the Identify Namespace structure since
* they can represent larger values. However, NPDGL or NPDAL may be 0 if
* unsupported. NPDG and NPDA are 0's based.
* From Figure 115 of NVM Command Set specification 1.1, NPDGL and NPDAL
* are supported if the high bit of OPTPERF is set. NPDG is supported if
* the low bit of OPTPERF is set. NPDA is supported if either is set.
* NPDG should be a multiple of NPDA, and likewise NPDGL should be a
* multiple of NPDAL, but the spec doesn't say anything about NPDG vs.
* NPDAL or NPDGL vs. NPDA. So compute the maximum instead of assuming
* NPDG(L) is the larger. If neither NPDG, NPDGL, NPDA, nor NPDAL are
* supported, default the discard_granularity to the logical block size.
*/
if (optperf & 0x2 && nvm && nvm->npdgl)
npdg = le32_to_cpu(nvm->npdgl);
else if (optperf & 0x1)
npdg = from0based(id->npdg);
if (optperf & 0x2 && nvm && nvm->npdal)
npda = le32_to_cpu(nvm->npdal);
else if (optperf)
npda = from0based(id->npda);
if (check_mul_overflow(max(npdg, npda), lim->logical_block_size,
&lim->discard_granularity))
lim->discard_granularity = lim->logical_block_size;
if (ctrl->dmrl)
lim->max_discard_segments = ctrl->dmrl;
else
lim->max_discard_segments = NVME_DSM_MAX_RANGES;
return valid;
}
@@ -2354,7 +2385,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
}
lbaf = nvme_lbaf_index(id->flbas);
if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
if (nvme_id_cns_ok(ns->ctrl, NVME_ID_CNS_CS_NS)) {
ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
if (ret < 0)
goto out;
@@ -2382,10 +2413,9 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
nvme_set_ctrl_limits(ns->ctrl, &lim, false);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
if (!nvme_update_disk_info(ns, id, nvm, &lim))
capacity = 0;
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
@@ -3389,7 +3419,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
if (id->wzsl && !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
free_data:

View File

@@ -762,6 +762,12 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1;
}
/* Decode a 2-byte "0's based"/"0-based" field */
static inline u32 from0based(__le16 value)
{
return (u32)le16_to_cpu(value) + 1;
}
static inline bool nvme_is_ana_error(u16 status)
{
switch (status & NVME_SCT_SC_MASK) {

View File

@@ -4176,6 +4176,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x502F), /* KINGSTON OM3SGP4xxxxK NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */

View File

@@ -658,7 +658,7 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
struct nvme_dhchap_key *key, *host_key;
int ret;
ret = nvme_auth_generate_key(dhchap_secret, &key);
ret = nvme_auth_parse_key(dhchap_secret, &key);
if (ret) {
kfree(dhchap_secret);
return ret;
@@ -716,7 +716,7 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
struct nvme_dhchap_key *key, *ctrl_key;
int ret;
ret = nvme_auth_generate_key(dhchap_secret, &key);
ret = nvme_auth_parse_key(dhchap_secret, &key);
if (ret) {
kfree(dhchap_secret);
return ret;
@@ -829,7 +829,49 @@ static ssize_t tls_configured_key_show(struct device *dev,
return sysfs_emit(buf, "%08x\n", key_serial(key));
}
static DEVICE_ATTR_RO(tls_configured_key);
static ssize_t tls_configured_key_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
int error, qid;
error = kstrtoint(buf, 10, &qid);
if (error)
return error;
/*
* We currently only allow userspace to write a `0` indicating
* generate a new key.
*/
if (qid)
return -EINVAL;
if (!ctrl->opts || !ctrl->opts->concat)
return -EOPNOTSUPP;
error = nvme_auth_negotiate(ctrl, 0);
if (error < 0) {
nvme_reset_ctrl(ctrl);
return error;
}
error = nvme_auth_wait(ctrl, 0);
if (error < 0) {
nvme_reset_ctrl(ctrl);
return error;
}
/*
* We need to reset the TLS connection, so let's just
* reset the controller.
*/
nvme_reset_ctrl(ctrl);
return count;
}
static DEVICE_ATTR_RW(tls_configured_key);
static ssize_t tls_keyring_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -861,7 +903,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
!ctrl->opts->tls && !ctrl->opts->concat)
return 0;
if (a == &dev_attr_tls_configured_key.attr &&
(!ctrl->opts->tls_key || ctrl->opts->concat))
!ctrl->opts->concat)
return 0;
if (a == &dev_attr_tls_keyring.attr &&
!ctrl->opts->keyring)

View File

@@ -1057,6 +1057,8 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
if (req->ns->bdev)
nvmet_bdev_set_nvm_limits(req->ns->bdev, id);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
out:
@@ -1603,7 +1605,7 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
mod_delayed_work(system_percpu_wq, &ctrl->ka_work, ctrl->kato * HZ);
out:
nvmet_req_complete(req, status);
}

View File

@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <crypto/hash.h>
#include <linux/crc32.h>
#include <linux/base64.h>
#include <linux/ctype.h>
@@ -45,15 +44,6 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
key_hash);
return -EINVAL;
}
if (key_hash > 0) {
/* Validate selected hash algorithm */
const char *hmac = nvme_auth_hmac_name(key_hash);
if (!crypto_has_shash(hmac, 0, 0)) {
pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
return -ENOTSUPP;
}
}
dhchap_secret = kstrdup(secret, GFP_KERNEL);
if (!dhchap_secret)
return -ENOMEM;
@@ -140,7 +130,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
return ret;
}
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset)
{
int ret = 0;
struct nvmet_host_link *p;
@@ -166,7 +156,7 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
goto out_unlock;
}
if (nvmet_queue_tls_keyid(sq)) {
if (!reset && nvmet_queue_tls_keyid(sq)) {
pr_debug("host %s tls enabled\n", ctrl->hostnqn);
goto out_unlock;
}
@@ -292,47 +282,30 @@ bool nvmet_check_auth_status(struct nvmet_req *req)
int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
unsigned int shash_len)
{
struct crypto_shash *shash_tfm;
SHASH_DESC_ON_STACK(shash, shash_tfm);
struct nvme_auth_hmac_ctx hmac;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name;
u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key;
u8 buf[4];
int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id);
if (!hash_name) {
pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
return -EINVAL;
}
shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(shash_tfm)) {
pr_err("failed to allocate shash %s\n", hash_name);
return PTR_ERR(shash_tfm);
}
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
pr_err("%s: hash len mismatch (len %d digest %d)\n",
__func__, shash_len,
crypto_shash_digestsize(shash_tfm));
ret = -EINVAL;
goto out_free_tfm;
}
transformed_key = nvme_auth_transform_key(ctrl->host_key,
ctrl->hostnqn);
if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key);
goto out_free_tfm;
}
if (IS_ERR(transformed_key))
return PTR_ERR(transformed_key);
ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
ret = nvme_auth_hmac_init(&hmac, ctrl->shash_id, transformed_key->key,
transformed_key->len);
if (ret)
goto out_free_response;
if (shash_len != nvme_auth_hmac_hash_len(ctrl->shash_id)) {
pr_err("%s: hash len mismatch (len %u digest %zu)\n", __func__,
shash_len, nvme_auth_hmac_hash_len(ctrl->shash_id));
ret = -EINVAL;
goto out_free_response;
}
if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
challenge = kmalloc(shash_len, GFP_KERNEL);
if (!challenge) {
@@ -345,101 +318,67 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c1,
challenge, shash_len);
if (ret)
goto out;
goto out_free_challenge;
}
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
req->sq->dhchap_tid);
shash->tfm = shash_tfm;
ret = crypto_shash_init(shash);
if (ret)
goto out;
ret = crypto_shash_update(shash, challenge, shash_len);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, challenge, shash_len);
put_unaligned_le32(req->sq->dhchap_s1, buf);
ret = crypto_shash_update(shash, buf, 4);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 4);
put_unaligned_le16(req->sq->dhchap_tid, buf);
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 2);
*buf = req->sq->sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "HostHost", 8);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, "HostHost", 8);
memset(buf, 0, 4);
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, response);
out:
nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn));
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn,
strlen(ctrl->subsys->subsysnqn));
nvme_auth_hmac_final(&hmac, response);
ret = 0;
out_free_challenge:
if (challenge != req->sq->dhchap_c1)
kfree(challenge);
out_free_response:
memzero_explicit(&hmac, sizeof(hmac));
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
return ret;
}
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
unsigned int shash_len)
{
struct crypto_shash *shash_tfm;
struct shash_desc *shash;
struct nvme_auth_hmac_ctx hmac;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name;
u8 *challenge = req->sq->dhchap_c2;
struct nvme_dhchap_key *transformed_key;
u8 buf[4];
int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id);
if (!hash_name) {
pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
return -EINVAL;
}
shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(shash_tfm)) {
pr_err("failed to allocate shash %s\n", hash_name);
return PTR_ERR(shash_tfm);
}
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
pr_debug("%s: hash len mismatch (len %d digest %d)\n",
__func__, shash_len,
crypto_shash_digestsize(shash_tfm));
ret = -EINVAL;
goto out_free_tfm;
}
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->subsys->subsysnqn);
if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key);
goto out_free_tfm;
}
if (IS_ERR(transformed_key))
return PTR_ERR(transformed_key);
ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
ret = nvme_auth_hmac_init(&hmac, ctrl->shash_id, transformed_key->key,
transformed_key->len);
if (ret)
goto out_free_response;
if (shash_len != nvme_auth_hmac_hash_len(ctrl->shash_id)) {
pr_err("%s: hash len mismatch (len %u digest %zu)\n", __func__,
shash_len, nvme_auth_hmac_hash_len(ctrl->shash_id));
ret = -EINVAL;
goto out_free_response;
}
if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
challenge = kmalloc(shash_len, GFP_KERNEL);
if (!challenge) {
@@ -455,55 +394,29 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out_free_challenge;
}
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
goto out_free_challenge;
}
shash->tfm = shash_tfm;
nvme_auth_hmac_update(&hmac, challenge, shash_len);
ret = crypto_shash_init(shash);
if (ret)
goto out;
ret = crypto_shash_update(shash, challenge, shash_len);
if (ret)
goto out;
put_unaligned_le32(req->sq->dhchap_s2, buf);
ret = crypto_shash_update(shash, buf, 4);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 4);
put_unaligned_le16(req->sq->dhchap_tid, buf);
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
nvme_auth_hmac_update(&hmac, buf, 2);
memset(buf, 0, 4);
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "Controller", 10);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, response);
out:
kfree(shash);
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, "Controller", 10);
nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn,
strlen(ctrl->subsys->subsysnqn));
nvme_auth_hmac_update(&hmac, buf, 1);
nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn));
nvme_auth_hmac_final(&hmac, response);
ret = 0;
out_free_challenge:
if (challenge != req->sq->dhchap_c2)
kfree(challenge);
out_free_response:
memzero_explicit(&hmac, sizeof(hmac));
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
return ret;
}
@@ -531,7 +444,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
}
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *pkey, int pkey_size)
const u8 *pkey, int pkey_size)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
int ret;
@@ -557,7 +470,8 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
void nvmet_auth_insert_psk(struct nvmet_sq *sq)
{
int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id);
u8 *psk, *digest, *tls_psk;
u8 *psk, *tls_psk;
char *digest;
size_t psk_len;
int ret;
#ifdef CONFIG_NVME_TARGET_TCP_TLS

View File

@@ -17,7 +17,6 @@
#include <linux/nvme-auth.h>
#endif
#include <linux/nvme-keyring.h>
#include <crypto/hash.h>
#include <crypto/kpp.h>
#include <linux/nospec.h>
@@ -2181,8 +2180,6 @@ static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
hmac_id = nvme_auth_hmac_id(page);
if (hmac_id == NVME_AUTH_HASH_INVALID)
return -EINVAL;
if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
return -ENOTSUPP;
host->dhchap_hash_id = hmac_id;
return count;
}

View File

@@ -1686,7 +1686,7 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
if (args->hostid)
uuid_copy(&ctrl->hostid, args->hostid);
dhchap_status = nvmet_setup_auth(ctrl, args->sq);
dhchap_status = nvmet_setup_auth(ctrl, args->sq, false);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
dhchap_status);
@@ -1942,12 +1942,13 @@ static int __init nvmet_init(void)
if (!nvmet_bvec_cache)
return -ENOMEM;
zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!zbd_wq)
goto out_destroy_bvec_cache;
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!buffered_io_wq)
goto out_free_zbd_work_queue;

View File

@@ -8,7 +8,6 @@
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/nvme-auth.h>
#include <crypto/hash.h>
#include <crypto/kpp.h>
#include "nvmet.h"
@@ -75,8 +74,7 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
if (!fallback_hash_id &&
crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
if (!fallback_hash_id && nvme_auth_hmac_hash_len(host_hmac_id))
fallback_hash_id = host_hmac_id;
if (ctrl->shash_id != host_hmac_id)
continue;
@@ -293,7 +291,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
pr_debug("%s: ctrl %d qid %d reset negotiation\n",
__func__, ctrl->cntlid, req->sq->qid);
if (!req->sq->qid) {
dhchap_status = nvmet_setup_auth(ctrl, req->sq);
dhchap_status = nvmet_setup_auth(ctrl, req->sq,
true);
if (dhchap_status) {
pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
ctrl->cntlid);
@@ -391,14 +390,15 @@ done:
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
mod_delayed_work(system_percpu_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ);
goto complete;
}
/* Final states, clear up variables */
nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}
complete:
nvmet_req_complete(req, status);
@@ -574,9 +574,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}

View File

@@ -792,9 +792,9 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", WQ_PERCPU, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
goto out_free_queue;

View File

@@ -30,11 +30,11 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->nacwu = lpp0b;
/*
* Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
* NOWS are defined for this namespace and should be used by
* the host for I/O optimization.
* OPTPERF = 11b indicates that the fields NPWG, NPWA, NPDG, NPDA,
* NPDGL, NPDAL, and NOWS are defined for this namespace and should be
* used by the host for I/O optimization.
*/
id->nsfeat |= 1 << 4;
id->nsfeat |= 0x3 << NVME_NS_FEAT_OPTPERF_SHIFT;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
@@ -52,6 +52,17 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->dlfeat = (1 << 3) | 0x1;
}
void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
struct nvme_id_ns_nvm *id)
{
/*
* NPDGL = Namespace Preferred Deallocate Granularity Large
* NPDAL = Namespace Preferred Deallocate Alignment Large
*/
id->npdgl = id->npdal = cpu_to_le32(bdev_discard_granularity(bdev) /
bdev_logical_block_size(bdev));
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
if (ns->bdev_file) {

View File

@@ -419,7 +419,6 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
@@ -427,7 +426,6 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}

View File

@@ -549,6 +549,8 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
struct nvme_id_ns_nvm *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
@@ -895,7 +897,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq);
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset);
void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
@@ -912,11 +914,11 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
u8 *buf, int buf_size);
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *buf, int buf_size);
const u8 *pkey, int pkey_size);
void nvmet_auth_insert_psk(struct nvmet_sq *sq);
#else
static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
struct nvmet_sq *sq)
struct nvmet_sq *sq, bool reset)
{
return 0;
}

View File

@@ -2225,7 +2225,7 @@ static int __init nvmet_tcp_init(void)
int ret;
nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;

View File

@@ -1,20 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869
*
* Extracted from fs/crypto/hkdf.c, which has
* Copyright 2019 Google LLC
*/
#ifndef _CRYPTO_HKDF_H
#define _CRYPTO_HKDF_H
#include <crypto/hash.h>
int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
u8 *prk);
int hkdf_expand(struct crypto_shash *hmac_tfm,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen);
#endif

View File

@@ -7,6 +7,7 @@
#define _NVME_AUTH_H
#include <crypto/kpp.h>
#include <crypto/sha2.h>
struct nvme_dhchap_key {
size_t len;
@@ -20,32 +21,44 @@ const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
const char *nvme_auth_hmac_name(u8 hmac_id);
const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name);
struct nvme_auth_hmac_ctx {
u8 hmac_id;
union {
struct hmac_sha256_ctx sha256;
struct hmac_sha384_ctx sha384;
struct hmac_sha512_ctx sha512;
};
};
int nvme_auth_hmac_init(struct nvme_auth_hmac_ctx *hmac, u8 hmac_id,
const u8 *key, size_t key_len);
void nvme_auth_hmac_update(struct nvme_auth_hmac_ctx *hmac, const u8 *data,
size_t data_len);
void nvme_auth_hmac_final(struct nvme_auth_hmac_ctx *hmac, u8 *out);
u32 nvme_auth_key_struct_size(u32 key_len);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash);
struct nvme_dhchap_key *nvme_auth_extract_key(const char *secret, u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key);
struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
struct nvme_dhchap_key *nvme_auth_transform_key(
struct nvme_dhchap_key *key, char *nqn);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen);
const struct nvme_dhchap_key *key, const char *nqn);
int nvme_auth_parse_key(const char *secret, struct nvme_dhchap_key **ret_key);
int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len,
const u8 *challenge, u8 *aug, size_t hlen);
int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
u8 *host_key, size_t host_key_len);
int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
u8 *ctrl_key, size_t ctrl_key_len,
const u8 *ctrl_key, size_t ctrl_key_len,
u8 *sess_key, size_t sess_key_len);
int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *c1, u8 *c2, size_t hash_len,
int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len,
const u8 *c1, const u8 *c2, size_t hash_len,
u8 **ret_psk, size_t *ret_len);
int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
char *subsysnqn, char *hostnqn, u8 **ret_digest);
int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
u8 *psk_digest, u8 **ret_psk);
int nvme_auth_generate_digest(u8 hmac_id, const u8 *psk, size_t psk_len,
const char *subsysnqn, const char *hostnqn,
char **ret_digest);
int nvme_auth_derive_tls_psk(int hmac_id, const u8 *psk, size_t psk_len,
const char *psk_digest, u8 **ret_psk);
#endif /* _NVME_AUTH_H */

View File

@@ -513,9 +513,16 @@ struct nvme_id_ns_nvm {
__u8 pic;
__u8 rsvd9[3];
__le32 elbaf[64];
__u8 rsvd268[3828];
__le32 npdgl;
__le32 nprg;
__le32 npra;
__le32 nors;
__le32 npdal;
__u8 rsvd288[3808];
};
static_assert(sizeof(struct nvme_id_ns_nvm) == 4096);
enum {
NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7,
@@ -590,7 +597,11 @@ enum {
enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FEAT_ATOMICS = 1 << 1,
NVME_NS_FEAT_IO_OPT = 1 << 4,
NVME_NS_FEAT_OPTPERF_SHIFT = 4,
/* In NVMe version 2.0 and below, OPTPERF is only bit 4 of NSFEAT */
NVME_NS_FEAT_OPTPERF_MASK = 0x1,
/* Since version 2.1, OPTPERF is bits 4 and 5 of NSFEAT */
NVME_NS_FEAT_OPTPERF_MASK_2_1 = 0x3,
NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
NVME_NS_FLBAS_LBA_UMASK = 0x60,
@@ -1837,6 +1848,11 @@ enum {
NVME_AUTH_HASH_INVALID = 0xff,
};
/* Maximum digest size for any NVME_AUTH_HASH_* value */
enum {
NVME_AUTH_MAX_DIGEST_SIZE = 64,
};
/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
enum {
NVME_AUTH_DHGROUP_NULL = 0x00,
@@ -2332,4 +2348,8 @@ enum nvme_pr_change_ptpl {
#define NVME_PR_IGNORE_KEY (1 << 3)
/* Section 8.3.4.5.2 of the NVMe 2.1 */
#define NVME_AUTH_DHCHAP_MAX_HASH_IDS 30
#define NVME_AUTH_DHCHAP_MAX_DH_IDS 30
#endif /* _LINUX_NVME_H */