Merge tag 'v7.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Replace crypto_get_default_rng with crypto_stdrng_get_bytes
   - Remove simd skcipher support
   - Allow algorithm types to be disabled when CRYPTO_SELFTESTS is off

  Algorithms:
   - Remove CPU-based des/3des acceleration
   - Add test vectors for authenc(hmac(md5),cbc({aes,des})) and
     authenc(hmac({md5,sha1,sha224,sha256,sha384,sha512}),rfc3686(ctr(aes)))
   - Replace spin lock with mutex in jitterentropy

  Drivers:
   - Add authenc algorithms to safexcel
   - Add support for zstd in qat
   - Add wireless mode support for QAT GEN6
   - Add anti-rollback support for QAT GEN6
   - Add support for ctr(aes), gcm(aes), and ccm(aes) in dthev2"

* tag 'v7.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (129 commits)
  crypto: af_alg - use sock_kmemdup in alg_setkey_by_key_serial
  crypto: vmx - remove CRYPTO_DEV_VMX from Kconfig
  crypto: omap - convert reqctx buffer to fixed-size array
  crypto: atmel-sha204a - add Thorsten Blum as maintainer
  crypto: atmel-ecc - add Thorsten Blum as maintainer
  crypto: qat - fix IRQ cleanup on 6xxx probe failure
  crypto: geniv - Remove unused spinlock from struct aead_geniv_ctx
  crypto: qce - simplify qce_xts_swapiv()
  crypto: hisilicon - Fix dma_unmap_single() direction
  crypto: talitos - rename first/last to first_desc/last_desc
  crypto: talitos - fix SEC1 32k ahash request limitation
  crypto: jitterentropy - replace long-held spinlock with mutex
  crypto: hisilicon - remove unused and non-public APIs for qm and sec
  crypto: hisilicon/qm - drop redundant variable initialization
  crypto: hisilicon/qm - remove else after return
  crypto: hisilicon/qm - add const qualifier to info_name in struct qm_cmd_dump_item
  crypto: hisilicon - fix the format string type error
  crypto: ccree - fix a memory leak in cc_mac_digest()
  crypto: qat - add support for zstd
  crypto: qat - use swab32 macro
  ...
This commit is contained in:
Linus Torvalds
2026-04-15 15:22:26 -07:00
166 changed files with 5209 additions and 3842 deletions

View File

@@ -50,6 +50,13 @@ Description: Dump debug registers from the QM.
Available for PF and VF in host. VF in guest currently only
has one debug register.
What: /sys/kernel/debug/hisi_hpre/<bdf>/dev_usage
Date: Mar 2026
Contact: linux-crypto@vger.kernel.org
Description: Query the real-time bandwidth usage of device.
Returns the bandwidth usage of each channel on the device.
The returned number is in percentage.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org

View File

@@ -24,6 +24,13 @@ Description: The <bdf> is related the function for PF and VF.
1/1000~1000/1000 of total QoS. The driver reading alg_qos to
get related QoS in the host and VM, Such as "cat alg_qos".
What: /sys/kernel/debug/hisi_sec2/<bdf>/dev_usage
Date: Mar 2026
Contact: linux-crypto@vger.kernel.org
Description: Query the real-time bandwidth usage of device.
Returns the bandwidth usage of each channel on the device.
The returned number is in percentage.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org

View File

@@ -36,6 +36,13 @@ Description: The <bdf> is related the function for PF and VF.
1/1000~1000/1000 of total QoS. The driver reading alg_qos to
get related QoS in the host and VM, Such as "cat alg_qos".
What: /sys/kernel/debug/hisi_zip/<bdf>/dev_usage
Date: Mar 2026
Contact: linux-crypto@vger.kernel.org
Description: Query the real-time bandwidth usage of device.
Returns the bandwidth usage of each channel on the device.
The returned number is in percentage.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org

View File

@@ -0,0 +1,114 @@
What: /sys/bus/pci/devices/<BDF>/qat_svn/
Date: June 2026
KernelVersion: 7.1
Contact: qat-linux@intel.com
Description: Directory containing Security Version Number (SVN) attributes for
the Anti-Rollback (ARB) feature. The ARB feature prevents downloading
older firmware versions to the acceleration device.
What: /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
Date: June 2026
KernelVersion: 7.1
Contact: qat-linux@intel.com
Description:
(RO) Reports the minimum allowed firmware SVN.
Returns an integer greater than zero. Firmware with SVN lower than
this value is rejected.
A write to qat_svn/commit will update this value. The update is not
persistent across reboot; on reboot, this value is reset from
qat_svn/permanent_min.
Example usage::
# cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
2
This attribute is available only on devices that support
Anti-Rollback.
What: /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
Date: June 2026
KernelVersion: 7.1
Contact: qat-linux@intel.com
Description:
(RO) Reports the persistent minimum SVN used to initialize
qat_svn/enforced_min on each reboot.
Returns an integer greater than zero. A write to qat_svn/commit
may update this value, depending on platform/BIOS settings.
Example usage::
# cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
3
This attribute is available only on devices that support
Anti-Rollback.
What: /sys/bus/pci/devices/<BDF>/qat_svn/active
Date: June 2026
KernelVersion: 7.1
Contact: qat-linux@intel.com
Description:
(RO) Reports the SVN of the currently active firmware image.
Returns an integer greater than zero.
Example usage::
# cat /sys/bus/pci/devices/<BDF>/qat_svn/active
2
This attribute is available only on devices that support
Anti-Rollback.
What: /sys/bus/pci/devices/<BDF>/qat_svn/commit
Date: June 2026
KernelVersion: 7.1
Contact: qat-linux@intel.com
Description:
(WO) Commits the currently active SVN as the minimum allowed SVN.
Writing 1 sets qat_svn/enforced_min to the value of qat_svn/active,
preventing future firmware loads with lower SVN.
Depending on platform/BIOS settings, a commit may also update
qat_svn/permanent_min.
Note that on reboot, qat_svn/enforced_min reverts to
qat_svn/permanent_min.
It is advisable to use this attribute with caution, only when
it is necessary to set a new minimum SVN for the firmware.
Before committing the SVN update, it is crucial to check the
current values of qat_svn/active, qat_svn/enforced_min and
qat_svn/permanent_min. This verification helps ensure that the
commit operation aligns with the intended outcome.
While writing to the file, any value other than '1' will result
in an error and have no effect.
Example usage::
## Read current values
# cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
2
# cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
2
# cat /sys/bus/pci/devices/<BDF>/qat_svn/active
3
## Commit active SVN
# echo 1 > /sys/bus/pci/devices/<BDF>/qat_svn/commit
## Read updated values
# cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
3
# cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
3
This attribute is available only on devices that support
Anti-Rollback.

View File

@@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous
and asynchronous invocations. The user space API call is fully
synchronous.
[1] https://www.chronox.de/libkcapi.html
[1] https://www.chronox.de/libkcapi/index.html
User Space API General Remarks
------------------------------
@@ -406,4 +406,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around
the aforementioned Netlink kernel interface. [1] also contains a test
application that invokes all libkcapi API calls.
[1] https://www.chronox.de/libkcapi.html
[1] https://www.chronox.de/libkcapi/index.html

View File

@@ -18,6 +18,7 @@ properties:
- items:
- enum:
- marvell,armada-3700-crypto
- mediatek,mt7981-crypto
- mediatek,mt7986-crypto
- const: inside-secure,safexcel-eip97ies
- const: inside-secure,safexcel-eip197b
@@ -80,7 +81,9 @@ allOf:
compatible:
not:
contains:
const: mediatek,mt7986-crypto
enum:
- mediatek,mt7981-crypto
- mediatek,mt7986-crypto
then:
properties:
interrupts:

View File

@@ -13,6 +13,7 @@ properties:
compatible:
items:
- enum:
- qcom,eliza-inline-crypto-engine
- qcom,kaanapali-inline-crypto-engine
- qcom,milos-inline-crypto-engine
- qcom,qcs8300-inline-crypto-engine
@@ -31,6 +32,11 @@ properties:
clocks:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
required:
- compatible
- reg
@@ -47,5 +53,26 @@ examples:
"qcom,inline-crypto-engine";
reg = <0x01d88000 0x8000>;
clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
operating-points-v2 = <&ice_opp_table>;
ice_opp_table: opp-table {
compatible = "operating-points-v2";
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-201500000 {
opp-hz = /bits/ 64 <201500000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-403000000 {
opp-hz = /bits/ 64 <403000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
...

View File

@@ -19,6 +19,7 @@ properties:
- microchip,sam9x60-trng
- items:
- enum:
- microchip,lan9691-trng
- microchip,sama7g5-trng
- const: atmel,at91sam9g45-trng
- items:

View File

@@ -2909,7 +2909,6 @@ F: include/linux/soc/ixp4xx/qmgr.h
ARM/INTEL KEEMBAY ARCHITECTURE
M: Paul J. Murphy <paul.j.murphy@intel.com>
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/arm/intel,keembay.yaml
F: arch/arm64/boot/dts/intel/keembay-evm.dts
@@ -6838,12 +6837,6 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: tools/crypto/tcrypt/tcrypt_speed_compare.py
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: crypto/rng.c
CS3308 MEDIA DRIVER
M: Hans Verkuil <hverkuil@kernel.org>
L: linux-media@vger.kernel.org
@@ -12956,7 +12949,6 @@ F: drivers/dma/ioat*
INTEL IAA CRYPTO DRIVER
M: Kristen Accardi <kristen.c.accardi@intel.com>
M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
M: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -13064,8 +13056,7 @@ F: Documentation/devicetree/bindings/display/intel,keembay-display.yaml
F: drivers/gpu/drm/kmb/
INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
S: Orphan
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
F: drivers/crypto/intel/keembay/Kconfig
F: drivers/crypto/intel/keembay/Makefile
@@ -13074,7 +13065,6 @@ F: drivers/crypto/intel/keembay/ocs-aes.c
F: drivers/crypto/intel/keembay/ocs-aes.h
INTEL KEEM BAY OCS ECC CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Prabhjot Khurana <prabhjot.khurana@intel.com>
M: Mark Gross <mgross@linux.intel.com>
S: Maintained
@@ -13084,7 +13074,6 @@ F: drivers/crypto/intel/keembay/Makefile
F: drivers/crypto/intel/keembay/keembay-ocs-ecc.c
INTEL KEEM BAY OCS HCU CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Declan Murphy <declan.murphy@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
@@ -17295,6 +17284,12 @@ S: Supported
F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
F: drivers/spi/spi-at91-usart.c
MICROCHIP ATSHA204A DRIVER
M: Thorsten Blum <thorsten.blum@linux.dev>
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/atmel-sha204a.c
MICROCHIP AUDIO ASOC DRIVERS
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
M: Andrei Simion <andrei.simion@microchip.com>
@@ -17314,9 +17309,10 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
F: drivers/media/platform/microchip/microchip-csi2dc.c
MICROCHIP ECC DRIVER
M: Thorsten Blum <thorsten.blum@linux.dev>
L: linux-crypto@vger.kernel.org
S: Orphan
F: drivers/crypto/atmel-ecc.*
S: Maintained
F: drivers/crypto/atmel-ecc.c
MICROCHIP EIC DRIVER
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>

View File

@@ -808,7 +808,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m

View File

@@ -793,7 +793,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m

View File

@@ -21,22 +21,6 @@ config CRYPTO_AES_S390
key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys.
config CRYPTO_DES_S390
tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR"
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Block ciphers: DES (FIPS 46-2) cipher algorithm
Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm
Length-preserving ciphers: DES with ECB, CBC, and CTR modes
Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes
Architecture: s390
As of z990 the ECB and CBC mode are hardware accelerated.
As of z196 the CTR mode is hardware accelerated.
config CRYPTO_HMAC_S390
tristate "Keyed-hash message authentication code: HMAC"
select CRYPTO_HASH

View File

@@ -3,7 +3,6 @@
# Cryptographic API
#
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o

View File

@@ -1,502 +0,0 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the DES Cipher Algorithm.
*
* Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
};
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = crypto_des_verify_key(tfm, key);
if (err)
return err;
memcpy(ctx->key, key, key_len);
return 0;
}
static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
}
static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void s390_des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT,
ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = s390_des_encrypt,
.cia_decrypt = s390_des_decrypt,
}
}
};
static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
cpacf_km(fc, ctx->key, walk.dst.virt.addr,
walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
cpacf_kmc(fc, &param, walk.dst.virt.addr,
walk.src.virt.addr, n);
memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
static int ecb_des_encrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_DEA);
}
static int ecb_des_decrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
}
static struct skcipher_alg ecb_des_alg = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-s390",
.base.cra_priority = 400, /* combo: des + ecb */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = ecb_des_encrypt,
.decrypt = ecb_des_decrypt,
};
static int cbc_des_encrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_DEA);
}
static int cbc_des_decrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
}
static struct skcipher_alg cbc_des_alg = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-s390",
.base.cra_priority = 400, /* combo: des + cbc */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = cbc_des_encrypt,
.decrypt = cbc_des_decrypt,
};
/*
* RFC2451:
*
* For DES-EDE3, there is no known need to reject weak or
* complementation keys. Any weakness is obviated by the use of
* multiple keys.
*
* However, if the first two or last two independent 64-bit keys are
* equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
* In fips mode additionally check for all 3 keys are unique.
*
*/
static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = crypto_des3_ede_verify_key(tfm, key);
if (err)
return err;
memcpy(ctx->key, key, key_len);
return 0;
}
static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
}
static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT,
ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_KEY_SIZE,
.cia_max_keysize = DES3_KEY_SIZE,
.cia_setkey = des3_setkey,
.cia_encrypt = des3_encrypt,
.cia_decrypt = des3_decrypt,
}
}
};
static int ecb_des3_encrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
}
static int ecb_des3_decrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
}
static struct skcipher_alg ecb_des3_alg = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + ecb */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = ecb_des3_encrypt,
.decrypt = ecb_des3_decrypt,
};
static int cbc_des3_encrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
}
static int cbc_des3_decrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
}
static struct skcipher_alg cbc_des3_alg = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + cbc */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = cbc_des3_encrypt,
.decrypt = cbc_des3_decrypt,
};
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* align to block size, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
memcpy(ctrptr, iv, DES_BLOCK_SIZE);
for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
ctrptr += DES_BLOCK_SIZE;
}
return n;
}
static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[DES_BLOCK_SIZE], *ctrptr;
struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
locked = mutex_trylock(&ctrblk_lock);
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
n = DES_BLOCK_SIZE;
if (nbytes >= 2*DES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
crypto_inc(walk.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
DES_BLOCK_SIZE, walk.iv);
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
static int ctr_des_crypt(struct skcipher_request *req)
{
return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
}
static struct skcipher_alg ctr_des_alg = {
.base.cra_name = "ctr(des)",
.base.cra_driver_name = "ctr-des-s390",
.base.cra_priority = 400, /* combo: des + ctr */
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = ctr_des_crypt,
.decrypt = ctr_des_crypt,
.chunksize = DES_BLOCK_SIZE,
};
static int ctr_des3_crypt(struct skcipher_request *req)
{
return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
}
static struct skcipher_alg ctr_des3_alg = {
.base.cra_name = "ctr(des3_ede)",
.base.cra_driver_name = "ctr-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + ede */
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = ctr_des3_crypt,
.decrypt = ctr_des3_crypt,
.chunksize = DES_BLOCK_SIZE,
};
static struct crypto_alg *des_s390_algs_ptr[2];
static int des_s390_algs_num;
static struct skcipher_alg *des_s390_skciphers_ptr[6];
static int des_s390_skciphers_num;
static int des_s390_register_alg(struct crypto_alg *alg)
{
int ret;
ret = crypto_register_alg(alg);
if (!ret)
des_s390_algs_ptr[des_s390_algs_num++] = alg;
return ret;
}
static int des_s390_register_skcipher(struct skcipher_alg *alg)
{
int ret;
ret = crypto_register_skcipher(alg);
if (!ret)
des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
return ret;
}
static void des_s390_exit(void)
{
while (des_s390_algs_num--)
crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
while (des_s390_skciphers_num--)
crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
}
static int __init des_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
ret = des_s390_register_alg(&des_alg);
if (ret)
goto out_err;
ret = des_s390_register_skcipher(&ecb_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
ret = des_s390_register_skcipher(&cbc_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
ret = des_s390_register_alg(&des3_alg);
if (ret)
goto out_err;
ret = des_s390_register_skcipher(&ecb_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
ret = des_s390_register_skcipher(&cbc_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
ret = des_s390_register_skcipher(&ctr_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ret = des_s390_register_skcipher(&ctr_des3_alg);
if (ret)
goto out_err;
}
return 0;
out_err:
des_s390_exit();
return ret;
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, des_s390_init);
module_exit(des_s390_exit);
MODULE_ALIAS_CRYPTO("des");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");

View File

@@ -2,20 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (sparc64)"
config CRYPTO_DES_SPARC64
tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC"
depends on SPARC64
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
help
Block cipher: DES (FIPS 46-2) cipher algorithm
Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
Length-preserving ciphers: DES with ECB and CBC modes
Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes
Architecture: sparc64
config CRYPTO_AES_SPARC64
tristate "Ciphers: AES, modes: ECB, CBC, CTR"
depends on SPARC64

View File

@@ -4,9 +4,7 @@
#
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
aes-sparc64-y := aes_glue.o
des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o

View File

@@ -1,419 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/opcodes.h>
#include <asm/visasm.h>
.align 32
ENTRY(des_sparc64_key_expand)
/* %o0=input_key, %o1=output_key */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
DES_KEXPAND(0, 0, 0)
DES_KEXPAND(0, 1, 2)
DES_KEXPAND(2, 3, 6)
DES_KEXPAND(2, 2, 4)
DES_KEXPAND(6, 3, 10)
DES_KEXPAND(6, 2, 8)
DES_KEXPAND(10, 3, 14)
DES_KEXPAND(10, 2, 12)
DES_KEXPAND(14, 1, 16)
DES_KEXPAND(16, 3, 20)
DES_KEXPAND(16, 2, 18)
DES_KEXPAND(20, 3, 24)
DES_KEXPAND(20, 2, 22)
DES_KEXPAND(24, 3, 28)
DES_KEXPAND(24, 2, 26)
DES_KEXPAND(28, 1, 30)
std %f0, [%o1 + 0x00]
std %f2, [%o1 + 0x08]
std %f4, [%o1 + 0x10]
std %f6, [%o1 + 0x18]
std %f8, [%o1 + 0x20]
std %f10, [%o1 + 0x28]
std %f12, [%o1 + 0x30]
std %f14, [%o1 + 0x38]
std %f16, [%o1 + 0x40]
std %f18, [%o1 + 0x48]
std %f20, [%o1 + 0x50]
std %f22, [%o1 + 0x58]
std %f24, [%o1 + 0x60]
std %f26, [%o1 + 0x68]
std %f28, [%o1 + 0x70]
std %f30, [%o1 + 0x78]
retl
VISExitHalf
ENDPROC(des_sparc64_key_expand)
.align 32
ENTRY(des_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des_sparc64_crypt)
.align 32
ENTRY(des_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
retl
ldd [%o0 + 0x78], %f30
ENDPROC(des_sparc64_load_keys)
.align 32
ENTRY(des_sparc64_ecb_crypt)
/* %o0=input, %o1=output, %o2=len */
1: ldd [%o0 + 0x00], %f32
add %o0, 0x08, %o0
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
nop
ENDPROC(des_sparc64_ecb_crypt)
.align 32
ENTRY(des_sparc64_cbc_encrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f32
1: ldd [%o0 + 0x00], %f34
fxor %f32, %f34, %f32
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f32, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_encrypt)
.align 32
ENTRY(des_sparc64_cbc_decrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f34
1: ldd [%o0 + 0x00], %f36
DES_IP(36, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
fxor %f32, %f34, %f32
fsrc2 %f36, %f34
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f36, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_decrypt)
.align 32
ENTRY(des3_ede_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x80], %f0
ldd [%o0 + 0x88], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x90], %f4
ldd [%o0 + 0x98], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0xa0], %f8
ldd [%o0 + 0xa8], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0xb0], %f12
ldd [%o0 + 0xb8], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0xc0], %f16
ldd [%o0 + 0xc8], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0xd0], %f20
ldd [%o0 + 0xd8], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0xe0], %f24
ldd [%o0 + 0xe8], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0xf0], %f28
ldd [%o0 + 0xf8], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x100], %f0
ldd [%o0 + 0x108], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x110], %f4
ldd [%o0 + 0x118], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0x120], %f8
ldd [%o0 + 0x128], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0x130], %f12
ldd [%o0 + 0x138], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0x140], %f16
ldd [%o0 + 0x148], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0x150], %f20
ldd [%o0 + 0x158], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0x160], %f24
ldd [%o0 + 0x168], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0x170], %f28
ldd [%o0 + 0x178], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des3_ede_sparc64_crypt)
.align 32
ENTRY(des3_ede_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
ldd [%o0 + 0x80], %f32
ldd [%o0 + 0x88], %f34
ldd [%o0 + 0x90], %f36
ldd [%o0 + 0x98], %f38
ldd [%o0 + 0xa0], %f40
ldd [%o0 + 0xa8], %f42
ldd [%o0 + 0xb0], %f44
ldd [%o0 + 0xb8], %f46
ldd [%o0 + 0xc0], %f48
ldd [%o0 + 0xc8], %f50
ldd [%o0 + 0xd0], %f52
ldd [%o0 + 0xd8], %f54
ldd [%o0 + 0xe0], %f56
retl
ldd [%o0 + 0xe8], %f58
ENDPROC(des3_ede_sparc64_load_keys)
#define DES3_LOOP_BODY(X) \
DES_IP(X, X) \
DES_ROUND(0, 2, X, X) \
DES_ROUND(4, 6, X, X) \
DES_ROUND(8, 10, X, X) \
DES_ROUND(12, 14, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0xf0], %f16; \
ldd [%o0 + 0xf8], %f18; \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x100], %f20; \
ldd [%o0 + 0x108], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x110], %f24; \
ldd [%o0 + 0x118], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x120], %f28; \
ldd [%o0 + 0x128], %f30; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(32, 34, X, X) \
ldd [%o0 + 0x130], %f0; \
ldd [%o0 + 0x138], %f2; \
DES_ROUND(36, 38, X, X) \
ldd [%o0 + 0x140], %f4; \
ldd [%o0 + 0x148], %f6; \
DES_ROUND(40, 42, X, X) \
ldd [%o0 + 0x150], %f8; \
ldd [%o0 + 0x158], %f10; \
DES_ROUND(44, 46, X, X) \
ldd [%o0 + 0x160], %f12; \
ldd [%o0 + 0x168], %f14; \
DES_ROUND(48, 50, X, X) \
DES_ROUND(52, 54, X, X) \
DES_ROUND(56, 58, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x170], %f16; \
ldd [%o0 + 0x178], %f18; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x50], %f20; \
ldd [%o0 + 0x58], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x60], %f24; \
ldd [%o0 + 0x68], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x70], %f28; \
ldd [%o0 + 0x78], %f30; \
DES_ROUND(0, 2, X, X) \
ldd [%o0 + 0x00], %f0; \
ldd [%o0 + 0x08], %f2; \
DES_ROUND(4, 6, X, X) \
ldd [%o0 + 0x10], %f4; \
ldd [%o0 + 0x18], %f6; \
DES_ROUND(8, 10, X, X) \
ldd [%o0 + 0x20], %f8; \
ldd [%o0 + 0x28], %f10; \
DES_ROUND(12, 14, X, X) \
ldd [%o0 + 0x30], %f12; \
ldd [%o0 + 0x38], %f14; \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x40], %f16; \
ldd [%o0 + 0x48], %f18; \
DES_IIP(X, X)
.align 32
ENTRY(des3_ede_sparc64_ecb_crypt)
/* %o0=key, %o1=input, %o2=output, %o3=len */
1: ldd [%o1 + 0x00], %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
nop
ENDPROC(des3_ede_sparc64_ecb_crypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_encrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f60
1: ldd [%o1 + 0x00], %f62
fxor %f60, %f62, %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
std %f60, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_encrypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_decrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f62
1: ldx [%o1 + 0x00], %g1
MOVXTOD_G1_F60
DES3_LOOP_BODY(60)
fxor %f62, %f60, %f60
MOVXTOD_G1_F62
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
stx %g1, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_decrypt)

View File

@@ -1,482 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Glue code for DES encryption optimized for sparc64 crypto opcodes.
*
* Copyright (C) 2012 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
#include <asm/opcodes.h>
#include <asm/pstate.h>
#include <asm/elf.h>
struct des_sparc64_ctx {
u64 encrypt_expkey[DES_EXPKEY_WORDS / 2];
u64 decrypt_expkey[DES_EXPKEY_WORDS / 2];
};
struct des3_ede_sparc64_ctx {
u64 encrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
u64 decrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
};
static void encrypt_to_decrypt(u64 *d, const u64 *e)
{
const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1;
int i;
for (i = 0; i < DES_EXPKEY_WORDS / 2; i++)
*d++ = *s--;
}
extern void des_sparc64_key_expand(const u32 *input_key, u64 *key);
static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
int err;
/* Even though we have special instructions for key expansion,
* we call des_verify_key() so that we don't have to write our own
* weak key detection code.
*/
err = crypto_des_verify_key(tfm, key);
if (err)
return err;
des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]);
encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]);
return 0;
}
static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return des_set_key(crypto_skcipher_tfm(tfm), key, keylen);
}
extern void des_sparc64_crypt(const u64 *key, const u64 *input,
u64 *output);
static void sparc_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u64 *K = ctx->encrypt_expkey;
des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
}
static void sparc_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u64 *K = ctx->decrypt_expkey;
des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
}
extern void des_sparc64_load_keys(const u64 *key);
extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output,
unsigned int len);
static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
if (encrypt)
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
else
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
while ((nbytes = walk.nbytes) != 0) {
des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr,
round_down(nbytes, DES_BLOCK_SIZE));
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
static int ecb_encrypt(struct skcipher_request *req)
{
return __ecb_crypt(req, true);
}
static int ecb_decrypt(struct skcipher_request *req)
{
return __ecb_crypt(req, false);
}
extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output,
unsigned int len, u64 *iv);
extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
unsigned int len, u64 *iv);
static int __cbc_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
if (encrypt)
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
else
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
while ((nbytes = walk.nbytes) != 0) {
if (encrypt)
des_sparc64_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
round_down(nbytes,
DES_BLOCK_SIZE),
walk.iv);
else
des_sparc64_cbc_decrypt(walk.src.virt.addr,
walk.dst.virt.addr,
round_down(nbytes,
DES_BLOCK_SIZE),
walk.iv);
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
static int cbc_encrypt(struct skcipher_request *req)
{
return __cbc_crypt(req, true);
}
static int cbc_decrypt(struct skcipher_request *req)
{
return __cbc_crypt(req, false);
}
static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
u64 k1[DES_EXPKEY_WORDS / 2];
u64 k2[DES_EXPKEY_WORDS / 2];
u64 k3[DES_EXPKEY_WORDS / 2];
int err;
err = crypto_des3_ede_verify_key(tfm, key);
if (err)
return err;
des_sparc64_key_expand((const u32 *)key, k1);
key += DES_KEY_SIZE;
des_sparc64_key_expand((const u32 *)key, k2);
key += DES_KEY_SIZE;
des_sparc64_key_expand((const u32 *)key, k3);
memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1));
encrypt_to_decrypt(&dctx->encrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0]);
memcpy(&dctx->encrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
&k3[0], sizeof(k3));
encrypt_to_decrypt(&dctx->decrypt_expkey[0], &k3[0]);
memcpy(&dctx->decrypt_expkey[DES_EXPKEY_WORDS / 2],
&k2[0], sizeof(k2));
encrypt_to_decrypt(&dctx->decrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
&k1[0]);
return 0;
}
static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen);
}
extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
u64 *output);
static void sparc_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u64 *K = ctx->encrypt_expkey;
des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
}
static void sparc_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u64 *K = ctx->decrypt_expkey;
des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
}
extern void des3_ede_sparc64_load_keys(const u64 *key);
extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len);
static int __ecb3_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
const u64 *K;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
if (encrypt)
K = &ctx->encrypt_expkey[0];
else
K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
while ((nbytes = walk.nbytes) != 0) {
des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr,
walk.dst.virt.addr,
round_down(nbytes, DES_BLOCK_SIZE));
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
static int ecb3_encrypt(struct skcipher_request *req)
{
return __ecb3_crypt(req, true);
}
static int ecb3_decrypt(struct skcipher_request *req)
{
return __ecb3_crypt(req, false);
}
extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
static int __cbc3_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
const u64 *K;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
if (encrypt)
K = &ctx->encrypt_expkey[0];
else
K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
while ((nbytes = walk.nbytes) != 0) {
if (encrypt)
des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr,
walk.dst.virt.addr,
round_down(nbytes,
DES_BLOCK_SIZE),
walk.iv);
else
des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr,
walk.dst.virt.addr,
round_down(nbytes,
DES_BLOCK_SIZE),
walk.iv);
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
static int cbc3_encrypt(struct skcipher_request *req)
{
return __cbc3_crypt(req, true);
}
static int cbc3_decrypt(struct skcipher_request *req)
{
return __cbc3_crypt(req, false);
}
static struct crypto_alg cipher_algs[] = {
{
.cra_name = "des",
.cra_driver_name = "des-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_sparc64_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_set_key,
.cia_encrypt = sparc_des_encrypt,
.cia_decrypt = sparc_des_decrypt
}
}
}, {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_set_key,
.cia_encrypt = sparc_des3_ede_encrypt,
.cia_decrypt = sparc_des3_ede_decrypt
}
}
}
};
static struct skcipher_alg skcipher_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-sparc64",
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
.base.cra_alignmask = 7,
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = des_set_key_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-sparc64",
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
.base.cra_alignmask = 7,
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_set_key_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3_ede-sparc64",
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
.base.cra_alignmask = 7,
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = des3_ede_set_key_skcipher,
.encrypt = ecb3_encrypt,
.decrypt = ecb3_decrypt,
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3_ede-sparc64",
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
.base.cra_alignmask = 7,
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_set_key_skcipher,
.encrypt = cbc3_encrypt,
.decrypt = cbc3_decrypt,
}
};
static bool __init sparc64_has_des_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_DES))
return false;
return true;
}
static int __init des_sparc64_mod_init(void)
{
int err;
if (!sparc64_has_des_opcode()) {
pr_info("sparc64 des opcodes not available.\n");
return -ENODEV;
}
pr_info("Using sparc64 des opcodes optimized DES implementation\n");
err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
if (err)
return err;
err = crypto_register_skciphers(skcipher_algs,
ARRAY_SIZE(skcipher_algs));
if (err)
crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
return err;
}
static void __exit des_sparc64_mod_fini(void)
{
crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
}
module_init(des_sparc64_mod_init);
module_exit(des_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
MODULE_ALIAS_CRYPTO("des");
MODULE_ALIAS_CRYPTO("des3_ede");
#include "crop_devid.c"

View File

@@ -99,20 +99,6 @@ config CRYPTO_CAST6_AVX_X86_64
Processes eight blocks in parallel.
config CRYPTO_DES3_EDE_X86_64
tristate "Ciphers: Triple DES EDE with modes: ECB, CBC"
depends on 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
imply CRYPTO_CTR
help
Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
Length-preserving ciphers: Triple DES EDE with ECB and CBC modes
Architecture: x86_64
Processes one or three blocks in parallel.
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)"
depends on 64BIT

View File

@@ -20,9 +20,6 @@ serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o

View File

@@ -1,831 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher
*
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*/
#include <linux/linkage.h>
.file "des3_ede-asm_64.S"
.text
#define s1 .L_s1
#define s2 ((s1) + (64*8))
#define s3 ((s2) + (64*8))
#define s4 ((s3) + (64*8))
#define s5 ((s4) + (64*8))
#define s6 ((s5) + (64*8))
#define s7 ((s6) + (64*8))
#define s8 ((s7) + (64*8))
/* register macros */
#define CTX %rdi
#define RL0 %r8
#define RL1 %r9
#define RL2 %r10
#define RL0d %r8d
#define RL1d %r9d
#define RL2d %r10d
#define RR0 %r11
#define RR1 %r12
#define RR2 %r13
#define RR0d %r11d
#define RR1d %r12d
#define RR2d %r13d
#define RW0 %rax
#define RW1 %rbx
#define RW2 %rcx
#define RW0d %eax
#define RW1d %ebx
#define RW2d %ecx
#define RW0bl %al
#define RW1bl %bl
#define RW2bl %cl
#define RW0bh %ah
#define RW1bh %bh
#define RW2bh %ch
#define RT0 %r15
#define RT1 %rsi
#define RT2 %r14
#define RT3 %rdx
#define RT0d %r15d
#define RT1d %esi
#define RT2d %r14d
#define RT3d %edx
/***********************************************************************
* 1-way 3DES
***********************************************************************/
#define do_permutation(a, b, offset, mask) \
movl a, RT0d; \
shrl $(offset), RT0d; \
xorl b, RT0d; \
andl $(mask), RT0d; \
xorl RT0d, b; \
shll $(offset), RT0d; \
xorl RT0d, a;
#define expand_to_64bits(val, mask) \
movl val##d, RT0d; \
rorl $4, RT0d; \
shlq $32, RT0; \
orq RT0, val; \
andq mask, val;
#define compress_to_64bits(val) \
movq val, RT0; \
shrq $32, RT0; \
roll $4, RT0d; \
orl RT0d, val##d;
#define initial_permutation(left, right) \
do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \
do_permutation(left##d, right##d, 16, 0x0000ffff); \
do_permutation(right##d, left##d, 2, 0x33333333); \
do_permutation(right##d, left##d, 8, 0x00ff00ff); \
movabs $0x3f3f3f3f3f3f3f3f, RT3; \
movl left##d, RW0d; \
roll $1, right##d; \
xorl right##d, RW0d; \
andl $0xaaaaaaaa, RW0d; \
xorl RW0d, left##d; \
xorl RW0d, right##d; \
roll $1, left##d; \
expand_to_64bits(right, RT3); \
expand_to_64bits(left, RT3);
#define final_permutation(left, right) \
compress_to_64bits(right); \
compress_to_64bits(left); \
movl right##d, RW0d; \
rorl $1, left##d; \
xorl left##d, RW0d; \
andl $0xaaaaaaaa, RW0d; \
xorl RW0d, right##d; \
xorl RW0d, left##d; \
rorl $1, right##d; \
do_permutation(right##d, left##d, 8, 0x00ff00ff); \
do_permutation(right##d, left##d, 2, 0x33333333); \
do_permutation(left##d, right##d, 16, 0x0000ffff); \
do_permutation(left##d, right##d, 4, 0x0f0f0f0f);
#define round1(n, from, to, load_next_key) \
xorq from, RW0; \
\
movzbl RW0bl, RT0d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
shrq $16, RW0; \
leaq s8(%rip), RW1; \
movq (RW1, RT0, 8), RT0; \
leaq s6(%rip), RW1; \
xorq (RW1, RT1, 8), to; \
movzbl RW0bl, RL1d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
leaq s4(%rip), RW1; \
xorq (RW1, RT2, 8), RT0; \
leaq s2(%rip), RW1; \
xorq (RW1, RT3, 8), to; \
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
leaq s7(%rip), RW1; \
xorq (RW1, RL1, 8), RT0; \
leaq s5(%rip), RW1; \
xorq (RW1, RT1, 8), to; \
leaq s3(%rip), RW1; \
xorq (RW1, RT2, 8), RT0; \
load_next_key(n, RW0); \
xorq RT0, to; \
leaq s1(%rip), RW1; \
xorq (RW1, RT3, 8), to; \
#define load_next_key(n, RWx) \
movq (((n) + 1) * 8)(CTX), RWx;
#define dummy2(a, b) /*_*/
#define read_block(io, left, right) \
movl (io), left##d; \
movl 4(io), right##d; \
bswapl left##d; \
bswapl right##d;
#define write_block(io, left, right) \
bswapl left##d; \
bswapl right##d; \
movl left##d, (io); \
movl right##d, 4(io);
SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
/* input:
* %rdi: round keys, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbx;
pushq %r12;
pushq %r13;
pushq %r14;
pushq %r15;
pushq %rsi; /* dst */
read_block(%rdx, RL0, RR0);
initial_permutation(RL0, RR0);
movq (CTX), RW0;
round1(0, RR0, RL0, load_next_key);
round1(1, RL0, RR0, load_next_key);
round1(2, RR0, RL0, load_next_key);
round1(3, RL0, RR0, load_next_key);
round1(4, RR0, RL0, load_next_key);
round1(5, RL0, RR0, load_next_key);
round1(6, RR0, RL0, load_next_key);
round1(7, RL0, RR0, load_next_key);
round1(8, RR0, RL0, load_next_key);
round1(9, RL0, RR0, load_next_key);
round1(10, RR0, RL0, load_next_key);
round1(11, RL0, RR0, load_next_key);
round1(12, RR0, RL0, load_next_key);
round1(13, RL0, RR0, load_next_key);
round1(14, RR0, RL0, load_next_key);
round1(15, RL0, RR0, load_next_key);
round1(16+0, RL0, RR0, load_next_key);
round1(16+1, RR0, RL0, load_next_key);
round1(16+2, RL0, RR0, load_next_key);
round1(16+3, RR0, RL0, load_next_key);
round1(16+4, RL0, RR0, load_next_key);
round1(16+5, RR0, RL0, load_next_key);
round1(16+6, RL0, RR0, load_next_key);
round1(16+7, RR0, RL0, load_next_key);
round1(16+8, RL0, RR0, load_next_key);
round1(16+9, RR0, RL0, load_next_key);
round1(16+10, RL0, RR0, load_next_key);
round1(16+11, RR0, RL0, load_next_key);
round1(16+12, RL0, RR0, load_next_key);
round1(16+13, RR0, RL0, load_next_key);
round1(16+14, RL0, RR0, load_next_key);
round1(16+15, RR0, RL0, load_next_key);
round1(32+0, RR0, RL0, load_next_key);
round1(32+1, RL0, RR0, load_next_key);
round1(32+2, RR0, RL0, load_next_key);
round1(32+3, RL0, RR0, load_next_key);
round1(32+4, RR0, RL0, load_next_key);
round1(32+5, RL0, RR0, load_next_key);
round1(32+6, RR0, RL0, load_next_key);
round1(32+7, RL0, RR0, load_next_key);
round1(32+8, RR0, RL0, load_next_key);
round1(32+9, RL0, RR0, load_next_key);
round1(32+10, RR0, RL0, load_next_key);
round1(32+11, RL0, RR0, load_next_key);
round1(32+12, RR0, RL0, load_next_key);
round1(32+13, RL0, RR0, load_next_key);
round1(32+14, RR0, RL0, load_next_key);
round1(32+15, RL0, RR0, dummy2);
final_permutation(RR0, RL0);
popq %rsi /* dst */
write_block(%rsi, RR0, RL0);
popq %r15;
popq %r14;
popq %r13;
popq %r12;
popq %rbx;
RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
/***********************************************************************
* 3-way 3DES
***********************************************************************/
#define expand_to_64bits(val, mask) \
movl val##d, RT0d; \
rorl $4, RT0d; \
shlq $32, RT0; \
orq RT0, val; \
andq mask, val;
#define compress_to_64bits(val) \
movq val, RT0; \
shrq $32, RT0; \
roll $4, RT0d; \
orl RT0d, val##d;
#define initial_permutation3(left, right) \
do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \
do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
\
do_permutation(right##0d, left##0d, 2, 0x33333333); \
do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
do_permutation(right##1d, left##1d, 2, 0x33333333); \
do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
do_permutation(right##2d, left##2d, 2, 0x33333333); \
do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
\
movabs $0x3f3f3f3f3f3f3f3f, RT3; \
\
movl left##0d, RW0d; \
roll $1, right##0d; \
xorl right##0d, RW0d; \
andl $0xaaaaaaaa, RW0d; \
xorl RW0d, left##0d; \
xorl RW0d, right##0d; \
roll $1, left##0d; \
expand_to_64bits(right##0, RT3); \
expand_to_64bits(left##0, RT3); \
movl left##1d, RW1d; \
roll $1, right##1d; \
xorl right##1d, RW1d; \
andl $0xaaaaaaaa, RW1d; \
xorl RW1d, left##1d; \
xorl RW1d, right##1d; \
roll $1, left##1d; \
expand_to_64bits(right##1, RT3); \
expand_to_64bits(left##1, RT3); \
movl left##2d, RW2d; \
roll $1, right##2d; \
xorl right##2d, RW2d; \
andl $0xaaaaaaaa, RW2d; \
xorl RW2d, left##2d; \
xorl RW2d, right##2d; \
roll $1, left##2d; \
expand_to_64bits(right##2, RT3); \
expand_to_64bits(left##2, RT3);
#define final_permutation3(left, right) \
compress_to_64bits(right##0); \
compress_to_64bits(left##0); \
movl right##0d, RW0d; \
rorl $1, left##0d; \
xorl left##0d, RW0d; \
andl $0xaaaaaaaa, RW0d; \
xorl RW0d, right##0d; \
xorl RW0d, left##0d; \
rorl $1, right##0d; \
compress_to_64bits(right##1); \
compress_to_64bits(left##1); \
movl right##1d, RW1d; \
rorl $1, left##1d; \
xorl left##1d, RW1d; \
andl $0xaaaaaaaa, RW1d; \
xorl RW1d, right##1d; \
xorl RW1d, left##1d; \
rorl $1, right##1d; \
compress_to_64bits(right##2); \
compress_to_64bits(left##2); \
movl right##2d, RW2d; \
rorl $1, left##2d; \
xorl left##2d, RW2d; \
andl $0xaaaaaaaa, RW2d; \
xorl RW2d, right##2d; \
xorl RW2d, left##2d; \
rorl $1, right##2d; \
\
do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
do_permutation(right##0d, left##0d, 2, 0x33333333); \
do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
do_permutation(right##1d, left##1d, 2, 0x33333333); \
do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
do_permutation(right##2d, left##2d, 2, 0x33333333); \
\
do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f);
#define round3(n, from, to, load_next_key, do_movq) \
xorq from##0, RW0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
load_next_key(n, RW0); \
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
xorq from##1, RW1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrl $16, RW1d; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
do_movq(RW0, RW1); \
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
xorq from##2, RW2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrl $16, RW2d; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
do_movq(RW0, RW2); \
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##2;
#define __movq(src, dst) \
movq src, dst;
SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
/* input:
* %rdi: ctx, round keys
* %rsi: dst (3 blocks)
* %rdx: src (3 blocks)
*/
pushq %rbx;
pushq %r12;
pushq %r13;
pushq %r14;
pushq %r15;
pushq %rsi /* dst */
/* load input */
movl 0 * 4(%rdx), RL0d;
movl 1 * 4(%rdx), RR0d;
movl 2 * 4(%rdx), RL1d;
movl 3 * 4(%rdx), RR1d;
movl 4 * 4(%rdx), RL2d;
movl 5 * 4(%rdx), RR2d;
bswapl RL0d;
bswapl RR0d;
bswapl RL1d;
bswapl RR1d;
bswapl RL2d;
bswapl RR2d;
initial_permutation3(RL, RR);
movq 0(CTX), RW0;
movq RW0, RW1;
movq RW0, RW2;
round3(0, RR, RL, load_next_key, __movq);
round3(1, RL, RR, load_next_key, __movq);
round3(2, RR, RL, load_next_key, __movq);
round3(3, RL, RR, load_next_key, __movq);
round3(4, RR, RL, load_next_key, __movq);
round3(5, RL, RR, load_next_key, __movq);
round3(6, RR, RL, load_next_key, __movq);
round3(7, RL, RR, load_next_key, __movq);
round3(8, RR, RL, load_next_key, __movq);
round3(9, RL, RR, load_next_key, __movq);
round3(10, RR, RL, load_next_key, __movq);
round3(11, RL, RR, load_next_key, __movq);
round3(12, RR, RL, load_next_key, __movq);
round3(13, RL, RR, load_next_key, __movq);
round3(14, RR, RL, load_next_key, __movq);
round3(15, RL, RR, load_next_key, __movq);
round3(16+0, RL, RR, load_next_key, __movq);
round3(16+1, RR, RL, load_next_key, __movq);
round3(16+2, RL, RR, load_next_key, __movq);
round3(16+3, RR, RL, load_next_key, __movq);
round3(16+4, RL, RR, load_next_key, __movq);
round3(16+5, RR, RL, load_next_key, __movq);
round3(16+6, RL, RR, load_next_key, __movq);
round3(16+7, RR, RL, load_next_key, __movq);
round3(16+8, RL, RR, load_next_key, __movq);
round3(16+9, RR, RL, load_next_key, __movq);
round3(16+10, RL, RR, load_next_key, __movq);
round3(16+11, RR, RL, load_next_key, __movq);
round3(16+12, RL, RR, load_next_key, __movq);
round3(16+13, RR, RL, load_next_key, __movq);
round3(16+14, RL, RR, load_next_key, __movq);
round3(16+15, RR, RL, load_next_key, __movq);
round3(32+0, RR, RL, load_next_key, __movq);
round3(32+1, RL, RR, load_next_key, __movq);
round3(32+2, RR, RL, load_next_key, __movq);
round3(32+3, RL, RR, load_next_key, __movq);
round3(32+4, RR, RL, load_next_key, __movq);
round3(32+5, RL, RR, load_next_key, __movq);
round3(32+6, RR, RL, load_next_key, __movq);
round3(32+7, RL, RR, load_next_key, __movq);
round3(32+8, RR, RL, load_next_key, __movq);
round3(32+9, RL, RR, load_next_key, __movq);
round3(32+10, RR, RL, load_next_key, __movq);
round3(32+11, RL, RR, load_next_key, __movq);
round3(32+12, RR, RL, load_next_key, __movq);
round3(32+13, RL, RR, load_next_key, __movq);
round3(32+14, RR, RL, load_next_key, __movq);
round3(32+15, RL, RR, dummy2, dummy2);
final_permutation3(RR, RL);
bswapl RR0d;
bswapl RL0d;
bswapl RR1d;
bswapl RL1d;
bswapl RR2d;
bswapl RL2d;
popq %rsi /* dst */
movl RR0d, 0 * 4(%rsi);
movl RL0d, 1 * 4(%rsi);
movl RR1d, 2 * 4(%rsi);
movl RL1d, 3 * 4(%rsi);
movl RR2d, 4 * 4(%rsi);
movl RL2d, 5 * 4(%rsi);
popq %r15;
popq %r14;
popq %r13;
popq %r12;
popq %rbx;
RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
.section .rodata, "a", @progbits
.align 16
.L_s1:
.quad 0x0010100001010400, 0x0000000000000000
.quad 0x0000100000010000, 0x0010100001010404
.quad 0x0010100001010004, 0x0000100000010404
.quad 0x0000000000000004, 0x0000100000010000
.quad 0x0000000000000400, 0x0010100001010400
.quad 0x0010100001010404, 0x0000000000000400
.quad 0x0010000001000404, 0x0010100001010004
.quad 0x0010000001000000, 0x0000000000000004
.quad 0x0000000000000404, 0x0010000001000400
.quad 0x0010000001000400, 0x0000100000010400
.quad 0x0000100000010400, 0x0010100001010000
.quad 0x0010100001010000, 0x0010000001000404
.quad 0x0000100000010004, 0x0010000001000004
.quad 0x0010000001000004, 0x0000100000010004
.quad 0x0000000000000000, 0x0000000000000404
.quad 0x0000100000010404, 0x0010000001000000
.quad 0x0000100000010000, 0x0010100001010404
.quad 0x0000000000000004, 0x0010100001010000
.quad 0x0010100001010400, 0x0010000001000000
.quad 0x0010000001000000, 0x0000000000000400
.quad 0x0010100001010004, 0x0000100000010000
.quad 0x0000100000010400, 0x0010000001000004
.quad 0x0000000000000400, 0x0000000000000004
.quad 0x0010000001000404, 0x0000100000010404
.quad 0x0010100001010404, 0x0000100000010004
.quad 0x0010100001010000, 0x0010000001000404
.quad 0x0010000001000004, 0x0000000000000404
.quad 0x0000100000010404, 0x0010100001010400
.quad 0x0000000000000404, 0x0010000001000400
.quad 0x0010000001000400, 0x0000000000000000
.quad 0x0000100000010004, 0x0000100000010400
.quad 0x0000000000000000, 0x0010100001010004
.L_s2:
.quad 0x0801080200100020, 0x0800080000000000
.quad 0x0000080000000000, 0x0001080200100020
.quad 0x0001000000100000, 0x0000000200000020
.quad 0x0801000200100020, 0x0800080200000020
.quad 0x0800000200000020, 0x0801080200100020
.quad 0x0801080000100000, 0x0800000000000000
.quad 0x0800080000000000, 0x0001000000100000
.quad 0x0000000200000020, 0x0801000200100020
.quad 0x0001080000100000, 0x0001000200100020
.quad 0x0800080200000020, 0x0000000000000000
.quad 0x0800000000000000, 0x0000080000000000
.quad 0x0001080200100020, 0x0801000000100000
.quad 0x0001000200100020, 0x0800000200000020
.quad 0x0000000000000000, 0x0001080000100000
.quad 0x0000080200000020, 0x0801080000100000
.quad 0x0801000000100000, 0x0000080200000020
.quad 0x0000000000000000, 0x0001080200100020
.quad 0x0801000200100020, 0x0001000000100000
.quad 0x0800080200000020, 0x0801000000100000
.quad 0x0801080000100000, 0x0000080000000000
.quad 0x0801000000100000, 0x0800080000000000
.quad 0x0000000200000020, 0x0801080200100020
.quad 0x0001080200100020, 0x0000000200000020
.quad 0x0000080000000000, 0x0800000000000000
.quad 0x0000080200000020, 0x0801080000100000
.quad 0x0001000000100000, 0x0800000200000020
.quad 0x0001000200100020, 0x0800080200000020
.quad 0x0800000200000020, 0x0001000200100020
.quad 0x0001080000100000, 0x0000000000000000
.quad 0x0800080000000000, 0x0000080200000020
.quad 0x0800000000000000, 0x0801000200100020
.quad 0x0801080200100020, 0x0001080000100000
.L_s3:
.quad 0x0000002000000208, 0x0000202008020200
.quad 0x0000000000000000, 0x0000200008020008
.quad 0x0000002008000200, 0x0000000000000000
.quad 0x0000202000020208, 0x0000002008000200
.quad 0x0000200000020008, 0x0000000008000008
.quad 0x0000000008000008, 0x0000200000020000
.quad 0x0000202008020208, 0x0000200000020008
.quad 0x0000200008020000, 0x0000002000000208
.quad 0x0000000008000000, 0x0000000000000008
.quad 0x0000202008020200, 0x0000002000000200
.quad 0x0000202000020200, 0x0000200008020000
.quad 0x0000200008020008, 0x0000202000020208
.quad 0x0000002008000208, 0x0000202000020200
.quad 0x0000200000020000, 0x0000002008000208
.quad 0x0000000000000008, 0x0000202008020208
.quad 0x0000002000000200, 0x0000000008000000
.quad 0x0000202008020200, 0x0000000008000000
.quad 0x0000200000020008, 0x0000002000000208
.quad 0x0000200000020000, 0x0000202008020200
.quad 0x0000002008000200, 0x0000000000000000
.quad 0x0000002000000200, 0x0000200000020008
.quad 0x0000202008020208, 0x0000002008000200
.quad 0x0000000008000008, 0x0000002000000200
.quad 0x0000000000000000, 0x0000200008020008
.quad 0x0000002008000208, 0x0000200000020000
.quad 0x0000000008000000, 0x0000202008020208
.quad 0x0000000000000008, 0x0000202000020208
.quad 0x0000202000020200, 0x0000000008000008
.quad 0x0000200008020000, 0x0000002008000208
.quad 0x0000002000000208, 0x0000200008020000
.quad 0x0000202000020208, 0x0000000000000008
.quad 0x0000200008020008, 0x0000202000020200
.L_s4:
.quad 0x1008020000002001, 0x1000020800002001
.quad 0x1000020800002001, 0x0000000800000000
.quad 0x0008020800002000, 0x1008000800000001
.quad 0x1008000000000001, 0x1000020000002001
.quad 0x0000000000000000, 0x0008020000002000
.quad 0x0008020000002000, 0x1008020800002001
.quad 0x1000000800000001, 0x0000000000000000
.quad 0x0008000800000000, 0x1008000000000001
.quad 0x1000000000000001, 0x0000020000002000
.quad 0x0008000000000000, 0x1008020000002001
.quad 0x0000000800000000, 0x0008000000000000
.quad 0x1000020000002001, 0x0000020800002000
.quad 0x1008000800000001, 0x1000000000000001
.quad 0x0000020800002000, 0x0008000800000000
.quad 0x0000020000002000, 0x0008020800002000
.quad 0x1008020800002001, 0x1000000800000001
.quad 0x0008000800000000, 0x1008000000000001
.quad 0x0008020000002000, 0x1008020800002001
.quad 0x1000000800000001, 0x0000000000000000
.quad 0x0000000000000000, 0x0008020000002000
.quad 0x0000020800002000, 0x0008000800000000
.quad 0x1008000800000001, 0x1000000000000001
.quad 0x1008020000002001, 0x1000020800002001
.quad 0x1000020800002001, 0x0000000800000000
.quad 0x1008020800002001, 0x1000000800000001
.quad 0x1000000000000001, 0x0000020000002000
.quad 0x1008000000000001, 0x1000020000002001
.quad 0x0008020800002000, 0x1008000800000001
.quad 0x1000020000002001, 0x0000020800002000
.quad 0x0008000000000000, 0x1008020000002001
.quad 0x0000000800000000, 0x0008000000000000
.quad 0x0000020000002000, 0x0008020800002000
.L_s5:
.quad 0x0000001000000100, 0x0020001002080100
.quad 0x0020000002080000, 0x0420001002000100
.quad 0x0000000000080000, 0x0000001000000100
.quad 0x0400000000000000, 0x0020000002080000
.quad 0x0400001000080100, 0x0000000000080000
.quad 0x0020001002000100, 0x0400001000080100
.quad 0x0420001002000100, 0x0420000002080000
.quad 0x0000001000080100, 0x0400000000000000
.quad 0x0020000002000000, 0x0400000000080000
.quad 0x0400000000080000, 0x0000000000000000
.quad 0x0400001000000100, 0x0420001002080100
.quad 0x0420001002080100, 0x0020001002000100
.quad 0x0420000002080000, 0x0400001000000100
.quad 0x0000000000000000, 0x0420000002000000
.quad 0x0020001002080100, 0x0020000002000000
.quad 0x0420000002000000, 0x0000001000080100
.quad 0x0000000000080000, 0x0420001002000100
.quad 0x0000001000000100, 0x0020000002000000
.quad 0x0400000000000000, 0x0020000002080000
.quad 0x0420001002000100, 0x0400001000080100
.quad 0x0020001002000100, 0x0400000000000000
.quad 0x0420000002080000, 0x0020001002080100
.quad 0x0400001000080100, 0x0000001000000100
.quad 0x0020000002000000, 0x0420000002080000
.quad 0x0420001002080100, 0x0000001000080100
.quad 0x0420000002000000, 0x0420001002080100
.quad 0x0020000002080000, 0x0000000000000000
.quad 0x0400000000080000, 0x0420000002000000
.quad 0x0000001000080100, 0x0020001002000100
.quad 0x0400001000000100, 0x0000000000080000
.quad 0x0000000000000000, 0x0400000000080000
.quad 0x0020001002080100, 0x0400001000000100
.L_s6:
.quad 0x0200000120000010, 0x0204000020000000
.quad 0x0000040000000000, 0x0204040120000010
.quad 0x0204000020000000, 0x0000000100000010
.quad 0x0204040120000010, 0x0004000000000000
.quad 0x0200040020000000, 0x0004040100000010
.quad 0x0004000000000000, 0x0200000120000010
.quad 0x0004000100000010, 0x0200040020000000
.quad 0x0200000020000000, 0x0000040100000010
.quad 0x0000000000000000, 0x0004000100000010
.quad 0x0200040120000010, 0x0000040000000000
.quad 0x0004040000000000, 0x0200040120000010
.quad 0x0000000100000010, 0x0204000120000010
.quad 0x0204000120000010, 0x0000000000000000
.quad 0x0004040100000010, 0x0204040020000000
.quad 0x0000040100000010, 0x0004040000000000
.quad 0x0204040020000000, 0x0200000020000000
.quad 0x0200040020000000, 0x0000000100000010
.quad 0x0204000120000010, 0x0004040000000000
.quad 0x0204040120000010, 0x0004000000000000
.quad 0x0000040100000010, 0x0200000120000010
.quad 0x0004000000000000, 0x0200040020000000
.quad 0x0200000020000000, 0x0000040100000010
.quad 0x0200000120000010, 0x0204040120000010
.quad 0x0004040000000000, 0x0204000020000000
.quad 0x0004040100000010, 0x0204040020000000
.quad 0x0000000000000000, 0x0204000120000010
.quad 0x0000000100000010, 0x0000040000000000
.quad 0x0204000020000000, 0x0004040100000010
.quad 0x0000040000000000, 0x0004000100000010
.quad 0x0200040120000010, 0x0000000000000000
.quad 0x0204040020000000, 0x0200000020000000
.quad 0x0004000100000010, 0x0200040120000010
.L_s7:
.quad 0x0002000000200000, 0x2002000004200002
.quad 0x2000000004000802, 0x0000000000000000
.quad 0x0000000000000800, 0x2000000004000802
.quad 0x2002000000200802, 0x0002000004200800
.quad 0x2002000004200802, 0x0002000000200000
.quad 0x0000000000000000, 0x2000000004000002
.quad 0x2000000000000002, 0x0000000004000000
.quad 0x2002000004200002, 0x2000000000000802
.quad 0x0000000004000800, 0x2002000000200802
.quad 0x2002000000200002, 0x0000000004000800
.quad 0x2000000004000002, 0x0002000004200000
.quad 0x0002000004200800, 0x2002000000200002
.quad 0x0002000004200000, 0x0000000000000800
.quad 0x2000000000000802, 0x2002000004200802
.quad 0x0002000000200800, 0x2000000000000002
.quad 0x0000000004000000, 0x0002000000200800
.quad 0x0000000004000000, 0x0002000000200800
.quad 0x0002000000200000, 0x2000000004000802
.quad 0x2000000004000802, 0x2002000004200002
.quad 0x2002000004200002, 0x2000000000000002
.quad 0x2002000000200002, 0x0000000004000000
.quad 0x0000000004000800, 0x0002000000200000
.quad 0x0002000004200800, 0x2000000000000802
.quad 0x2002000000200802, 0x0002000004200800
.quad 0x2000000000000802, 0x2000000004000002
.quad 0x2002000004200802, 0x0002000004200000
.quad 0x0002000000200800, 0x0000000000000000
.quad 0x2000000000000002, 0x2002000004200802
.quad 0x0000000000000000, 0x2002000000200802
.quad 0x0002000004200000, 0x0000000000000800
.quad 0x2000000004000002, 0x0000000004000800
.quad 0x0000000000000800, 0x2002000000200002
.L_s8:
.quad 0x0100010410001000, 0x0000010000001000
.quad 0x0000000000040000, 0x0100010410041000
.quad 0x0100000010000000, 0x0100010410001000
.quad 0x0000000400000000, 0x0100000010000000
.quad 0x0000000400040000, 0x0100000010040000
.quad 0x0100010410041000, 0x0000010000041000
.quad 0x0100010010041000, 0x0000010400041000
.quad 0x0000010000001000, 0x0000000400000000
.quad 0x0100000010040000, 0x0100000410000000
.quad 0x0100010010001000, 0x0000010400001000
.quad 0x0000010000041000, 0x0000000400040000
.quad 0x0100000410040000, 0x0100010010041000
.quad 0x0000010400001000, 0x0000000000000000
.quad 0x0000000000000000, 0x0100000410040000
.quad 0x0100000410000000, 0x0100010010001000
.quad 0x0000010400041000, 0x0000000000040000
.quad 0x0000010400041000, 0x0000000000040000
.quad 0x0100010010041000, 0x0000010000001000
.quad 0x0000000400000000, 0x0100000410040000
.quad 0x0000010000001000, 0x0000010400041000
.quad 0x0100010010001000, 0x0000000400000000
.quad 0x0100000410000000, 0x0100000010040000
.quad 0x0100000410040000, 0x0100000010000000
.quad 0x0000000000040000, 0x0100010410001000
.quad 0x0000000000000000, 0x0100010410041000
.quad 0x0000000400040000, 0x0100000410000000
.quad 0x0100000010040000, 0x0100010010001000
.quad 0x0100010410001000, 0x0000000000000000
.quad 0x0100010410041000, 0x0000010000041000
.quad 0x0000010000041000, 0x0000010400001000
.quad 0x0000010400001000, 0x0000000400040000
.quad 0x0100000010000000, 0x0100010010041000

View File

@@ -1,391 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for assembler optimized version of 3DES
*
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
struct des3_ede_x86_ctx {
struct des3_ede_ctx enc;
struct des3_ede_ctx dec;
};
/* regular block cipher functions */
asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
const u8 *src);
/* 3-way parallel cipher functions */
asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
const u8 *src);
static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *enc_ctx = ctx->enc.expkey;
des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
}
static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *dec_ctx = ctx->dec.expkey;
des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
}
static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *dec_ctx = ctx->dec.expkey;
des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
}
static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
{
const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
const u8 *wsrc = walk.src.virt.addr;
u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 3) {
do {
des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
wsrc);
wsrc += bsize * 3;
wdst += bsize * 3;
nbytes -= bsize * 3;
} while (nbytes >= bsize * 3);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_crypt(req, ctx->enc.expkey);
}
static int ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_crypt(req, ctx->dec.expkey);
}
static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
struct skcipher_walk *walk)
{
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 *iv = (u64 *)walk->iv;
do {
*dst = *src ^ *iv;
des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
*(u64 *)walk->iv = *iv;
return nbytes;
}
static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
nbytes = __cbc_encrypt(ctx, &walk);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
struct skcipher_walk *walk)
{
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ivs[3 - 1];
u64 last_iv;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process four block batch */
if (nbytes >= bsize * 3) {
do {
nbytes -= bsize * 3 - bsize;
src -= 3 - 1;
dst -= 3 - 1;
ivs[0] = src[0];
ivs[1] = src[1];
des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
dst[1] ^= ivs[0];
dst[2] ^= ivs[1];
nbytes -= bsize;
if (nbytes < bsize)
goto done;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * 3);
}
/* Handle leftovers */
for (;;) {
des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
}
done:
*dst ^= *(u64 *)walk->iv;
*(u64 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
nbytes = __cbc_decrypt(ctx, &walk);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
u32 i, j, tmp;
int err;
err = des3_ede_expand_key(&ctx->enc, key, keylen);
if (err == -ENOKEY) {
if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
err = -EINVAL;
else
err = 0;
}
if (err) {
memset(ctx, 0, sizeof(*ctx));
return err;
}
/* Fix encryption context for this implementation and form decryption
* context. */
j = DES3_EDE_EXPKEY_WORDS - 2;
for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
tmp = ror32(ctx->enc.expkey[i + 1], 4);
ctx->enc.expkey[i + 1] = tmp;
ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0];
ctx->dec.expkey[j + 1] = tmp;
}
return 0;
}
static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key,
unsigned int keylen)
{
return des3_ede_x86_setkey(&tfm->base, key, keylen);
}
static struct crypto_alg des3_ede_cipher = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_x86_setkey,
.cia_encrypt = des3_ede_x86_encrypt,
.cia_decrypt = des3_ede_x86_decrypt,
}
}
};
static struct skcipher_alg des3_ede_skciphers[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3_ede-asm",
.base.cra_priority = 300,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = des3_ede_x86_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3_ede-asm",
.base.cra_priority = 300,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_x86_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}
};
static bool is_blacklisted_cpu(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x0f) {
/*
* On Pentium 4, des3_ede-x86_64 is slower than generic C
* implementation because use of 64bit rotates (which are really
* slow on P4). Therefore blacklist P4s.
*/
return true;
}
return false;
}
static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init des3_ede_x86_init(void)
{
int err;
if (!force && is_blacklisted_cpu()) {
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
return -ENODEV;
}
err = crypto_register_alg(&des3_ede_cipher);
if (err)
return err;
err = crypto_register_skciphers(des3_ede_skciphers,
ARRAY_SIZE(des3_ede_skciphers));
if (err)
crypto_unregister_alg(&des3_ede_cipher);
return err;
}
static void __exit des3_ede_x86_fini(void)
{
crypto_unregister_alg(&des3_ede_cipher);
crypto_unregister_skciphers(des3_ede_skciphers,
ARRAY_SIZE(des3_ede_skciphers));
}
module_init(des3_ede_x86_init);
module_exit(des3_ede_x86_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_ALIAS_CRYPTO("des3_ede-asm");
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");

View File

@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
depends on CRYPTO_DRBG && CRYPTO_SELFTESTS
depends on CRYPTO_DRBG=y && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -109,10 +109,6 @@ config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_RNG_DEFAULT
tristate
select CRYPTO_DRBG_MENU
config CRYPTO_AKCIPHER2
tristate
select CRYPTO_ALGAPI2
@@ -151,19 +147,20 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
select CRYPTO_ACOMP2
select CRYPTO_AEAD2
select CRYPTO_AKCIPHER2
select CRYPTO_SIG2
select CRYPTO_HASH2
select CRYPTO_KPP2
select CRYPTO_RNG2
select CRYPTO_SKCIPHER2
select CRYPTO_ACOMP2 if CRYPTO_SELFTESTS
select CRYPTO_AEAD2 if CRYPTO_SELFTESTS
select CRYPTO_AKCIPHER2 if CRYPTO_SELFTESTS
select CRYPTO_SIG2 if CRYPTO_SELFTESTS
select CRYPTO_HASH2 if CRYPTO_SELFTESTS
select CRYPTO_KPP2 if CRYPTO_SELFTESTS
select CRYPTO_RNG2 if CRYPTO_SELFTESTS
select CRYPTO_SKCIPHER2 if CRYPTO_SELFTESTS
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
depends on NET
select CRYPTO_MANAGER
select CRYPTO_RNG
help
Userspace configuration for cryptographic instantiations such as
cbc(aes).
@@ -218,6 +215,7 @@ config CRYPTO_PCRYPT
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
@@ -251,7 +249,10 @@ config CRYPTO_KRB5ENC
config CRYPTO_BENCHMARK
tristate "Crypto benchmarking module"
depends on m || EXPERT
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_MANAGER
select CRYPTO_SKCIPHER
help
Quick & dirty crypto benchmarking module.
@@ -261,10 +262,16 @@ config CRYPTO_BENCHMARK
config CRYPTO_SIMD
tristate
select CRYPTO_AEAD
select CRYPTO_CRYPTD
config CRYPTO_ENGINE
tristate
select CRYPTO_AEAD
select CRYPTO_AKCIPHER
select CRYPTO_HASH
select CRYPTO_KPP
select CRYPTO_SKCIPHER
endmenu
@@ -290,7 +297,6 @@ config CRYPTO_DH
config CRYPTO_DH_RFC7919_GROUPS
bool "RFC 7919 FFDHE groups"
depends on CRYPTO_DH
select CRYPTO_RNG_DEFAULT
help
FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups
defined in RFC7919.
@@ -302,7 +308,6 @@ config CRYPTO_DH_RFC7919_GROUPS
config CRYPTO_ECC
tristate
select CRYPTO_RNG_DEFAULT
config CRYPTO_ECDH
tristate "ECDH (Elliptic Curve Diffie-Hellman)"
@@ -800,7 +805,6 @@ config CRYPTO_GENIV
tristate
select CRYPTO_AEAD
select CRYPTO_MANAGER
select CRYPTO_RNG_DEFAULT
config CRYPTO_SEQIV
tristate "Sequence Number IV Generator"

View File

@@ -324,15 +324,13 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
return PTR_ERR(ret);
}
key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
key_data = sock_kmemdup(&ask->sk, ret, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
key_put(key);
return -ENOMEM;
}
memcpy(key_data, ret, key_datalen);
up_read(&key->sem);
key_put(key);

View File

@@ -646,7 +646,8 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(req);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct shash_desc *desc = &rctx->desc;
desc->tfm = ctx->child;
@@ -952,115 +953,6 @@ static struct crypto_template cryptd_tmpl = {
.module = THIS_MODULE,
};
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_skcipher_ctx *ctx;
struct crypto_skcipher *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_skcipher(tfm);
return ERR_PTR(-EINVAL);
}
ctx = crypto_skcipher_ctx(tfm);
refcount_set(&ctx->refcnt, 1);
return container_of(tfm, struct cryptd_skcipher, base);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
return refcount_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
if (refcount_dec_and_test(&ctx->refcnt))
crypto_free_skcipher(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_hash_ctx *ctx;
struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
ctx = crypto_ahash_ctx(tfm);
refcount_set(&ctx->refcnt, 1);
return __cryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return &rctx->desc;
}
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return refcount_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
if (refcount_dec_and_test(&ctx->refcnt))
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask)
{

View File

@@ -388,13 +388,7 @@ static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime,
* 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64
* random bits and interpret them as a big endian integer.
*/
err = -EFAULT;
if (crypto_get_default_rng())
goto out_err;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key,
oversampling_size);
crypto_put_default_rng();
err = crypto_stdrng_get_bytes(key, oversampling_size);
if (err)
goto out_err;

View File

@@ -1780,7 +1780,7 @@ static inline int __init drbg_healthcheck_sanity(void)
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
/* overflow addtllen with additonal info string */
/* overflow addtllen with additional info string */
len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
BUG_ON(0 < len);
/* overflow max_bits */

View File

@@ -1533,16 +1533,11 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
* This condition is met by the default RNG because it selects a favored
* DRBG with a security strength of 256.
* This condition is met by stdrng because it selects a favored DRBG
* with a security strength of 256.
*/
if (crypto_get_default_rng())
return -EFAULT;
/* Step 3: obtain N returned_bits from the DRBG. */
err = crypto_rng_get_bytes(crypto_default_rng,
(u8 *)private_key, nbytes);
crypto_put_default_rng();
err = crypto_stdrng_get_bytes(private_key, nbytes);
if (err)
return err;

View File

@@ -112,15 +112,7 @@ int aead_init_geniv(struct crypto_aead *aead)
struct crypto_aead *child;
int err;
spin_lock_init(&ctx->lock);
err = crypto_get_default_rng();
if (err)
goto out;
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
crypto_aead_ivsize(aead));
crypto_put_default_rng();
err = crypto_stdrng_get_bytes(ctx->salt, crypto_aead_ivsize(aead));
if (err)
goto out;

View File

@@ -41,6 +41,7 @@
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <crypto/internal/rng.h>
@@ -172,7 +173,7 @@ void jent_read_random_block(struct sha3_ctx *hash_state, char *dst,
***************************************************************************/
struct jitterentropy {
spinlock_t jent_lock;
struct mutex jent_lock;
struct rand_data *entropy_collector;
struct sha3_ctx hash_state;
};
@@ -181,14 +182,14 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
spin_lock(&rng->jent_lock);
mutex_lock(&rng->jent_lock);
memzero_explicit(&rng->hash_state, sizeof(rng->hash_state));
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
spin_unlock(&rng->jent_lock);
mutex_unlock(&rng->jent_lock);
}
static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -196,7 +197,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
int ret = 0;
spin_lock_init(&rng->jent_lock);
mutex_init(&rng->jent_lock);
/* Use SHA3-256 as conditioner */
sha3_256_init(&rng->hash_state);
@@ -208,7 +209,6 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
goto err;
}
spin_lock_init(&rng->jent_lock);
return 0;
err:
@@ -223,7 +223,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
struct jitterentropy *rng = crypto_rng_ctx(tfm);
int ret = 0;
spin_lock(&rng->jent_lock);
mutex_lock(&rng->jent_lock);
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
@@ -249,7 +249,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
ret = -EINVAL;
}
spin_unlock(&rng->jent_lock);
mutex_unlock(&rng->jent_lock);
return ret;
}

View File

@@ -154,7 +154,7 @@ static int krb5enc_dispatch_encrypt(struct aead_request *req,
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
skcipher_request_set_tfm(skreq, enc);
skcipher_request_set_callback(skreq, aead_request_flags(req),
skcipher_request_set_callback(skreq, flags,
krb5enc_encrypt_done, req);
skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv);
@@ -192,7 +192,8 @@ static void krb5enc_encrypt_ahash_done(void *data, int err)
krb5enc_insert_checksum(req, ahreq->result);
err = krb5enc_dispatch_encrypt(req, 0);
err = krb5enc_dispatch_encrypt(req,
aead_request_flags(req) & ~CRYPTO_TFM_REQ_MAY_SLEEP);
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}

View File

@@ -134,7 +134,7 @@ static int lrw_next_index(u32 *counter)
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* multiple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the lrw_next_index() calls again.
*/
static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)

View File

@@ -24,8 +24,7 @@
#include "internal.h"
static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng;
EXPORT_SYMBOL_GPL(crypto_default_rng);
static struct crypto_rng *crypto_default_rng;
static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
@@ -106,7 +105,7 @@ struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_rng);
int crypto_get_default_rng(void)
static int crypto_get_default_rng(void)
{
struct crypto_rng *rng;
int err;
@@ -135,15 +134,27 @@ unlock:
return err;
}
EXPORT_SYMBOL_GPL(crypto_get_default_rng);
void crypto_put_default_rng(void)
static void crypto_put_default_rng(void)
{
mutex_lock(&crypto_default_rng_lock);
crypto_default_rng_refcnt--;
mutex_unlock(&crypto_default_rng_lock);
}
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
int __crypto_stdrng_get_bytes(void *buf, unsigned int len)
{
int err;
err = crypto_get_default_rng();
if (err)
return err;
err = crypto_rng_get_bytes(crypto_default_rng, buf, len);
crypto_put_default_rng();
return err;
}
EXPORT_SYMBOL_GPL(__crypto_stdrng_get_bytes);
#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
int crypto_del_default_rng(void)

View File

@@ -13,11 +13,11 @@
/*
* Shared crypto SIMD helpers. These functions dynamically create and register
* an skcipher or AEAD algorithm that wraps another, internal algorithm. The
* wrapper ensures that the internal algorithm is only executed in a context
* where SIMD instructions are usable, i.e. where may_use_simd() returns true.
* If SIMD is already usable, the wrapper directly calls the internal algorithm.
* Otherwise it defers execution to a workqueue via cryptd.
* an AEAD algorithm that wraps another, internal algorithm. The wrapper
* ensures that the internal algorithm is only executed in a context where SIMD
* instructions are usable, i.e. where may_use_simd() returns true. If SIMD is
* already usable, the wrapper directly calls the internal algorithm. Otherwise
* it defers execution to a workqueue via cryptd.
*
* This is an alternative to the internal algorithm implementing a fallback for
* the !may_use_simd() case itself.
@@ -30,232 +30,11 @@
#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <asm/simd.h>
/* skcipher support */
struct simd_skcipher_alg {
const char *ialg_name;
struct skcipher_alg alg;
};
struct simd_skcipher_ctx {
struct cryptd_skcipher *cryptd_tfm;
};
static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, key_len);
}
static int simd_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq;
struct crypto_skcipher *child;
subreq = skcipher_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
return crypto_skcipher_encrypt(subreq);
}
static int simd_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq;
struct crypto_skcipher *child;
subreq = skcipher_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
return crypto_skcipher_decrypt(subreq);
}
static void simd_skcipher_exit(struct crypto_skcipher *tfm)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
cryptd_free_skcipher(ctx->cryptd_tfm);
}
static int simd_skcipher_init(struct crypto_skcipher *tfm)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct cryptd_skcipher *cryptd_tfm;
struct simd_skcipher_alg *salg;
struct skcipher_alg *alg;
unsigned reqsize;
alg = crypto_skcipher_alg(tfm);
salg = container_of(alg, struct simd_skcipher_alg, alg);
cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
reqsize += sizeof(struct skcipher_request);
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
const char *algname,
const char *drvname,
const char *basename)
{
struct simd_skcipher_alg *salg;
struct skcipher_alg *alg;
int err;
salg = kzalloc_obj(*salg);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
goto out;
}
salg->ialg_name = basename;
alg = &salg->alg;
err = -ENAMETOOLONG;
if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
alg->base.cra_module = ialg->base.cra_module;
alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
alg->ivsize = ialg->ivsize;
alg->chunksize = ialg->chunksize;
alg->min_keysize = ialg->min_keysize;
alg->max_keysize = ialg->max_keysize;
alg->init = simd_skcipher_init;
alg->exit = simd_skcipher_exit;
alg->setkey = simd_skcipher_setkey;
alg->encrypt = simd_skcipher_encrypt;
alg->decrypt = simd_skcipher_decrypt;
err = crypto_register_skcipher(alg);
if (err)
goto out_free_salg;
out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
goto out;
}
EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
void simd_skcipher_free(struct simd_skcipher_alg *salg)
{
crypto_unregister_skcipher(&salg->alg);
kfree(salg);
}
EXPORT_SYMBOL_GPL(simd_skcipher_free);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs)
{
int err;
int i;
const char *algname;
const char *drvname;
const char *basename;
struct simd_skcipher_alg *simd;
err = crypto_register_skciphers(algs, count);
if (err)
return err;
for (i = 0; i < count; i++) {
WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
simd_algs[i] = simd;
}
return 0;
err_unregister:
simd_unregister_skciphers(algs, count, simd_algs);
return err;
}
EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs)
{
int i;
crypto_unregister_skciphers(algs, count);
for (i = 0; i < count; i++) {
if (simd_algs[i]) {
simd_skcipher_free(simd_algs[i]);
simd_algs[i] = NULL;
}
}
}
EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
/* AEAD support */
struct simd_aead_alg {
const char *ialg_name;
struct aead_alg alg;
@@ -437,13 +216,17 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
const char *basename;
struct simd_aead_alg *simd;
for (i = 0; i < count; i++) {
if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) ||
strncmp(algs[i].base.cra_driver_name, "__", 2)))
return -EINVAL;
}
err = crypto_register_aeads(algs, count);
if (err)
return err;
for (i = 0; i < count; i++) {
WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;

View File

@@ -911,8 +911,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
break;
}
if (klen)
crypto_ahash_setkey(tfm, tvmem[0], klen);
if (klen) {
ret = crypto_ahash_setkey(tfm, tvmem[0], klen);
if (ret) {
pr_err("setkey() failed flags=%x: %d\n",
crypto_ahash_get_flags(tfm), ret);
break;
}
}
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
@@ -2795,6 +2801,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
if (!num_mb) {
pr_warn("num_mb must be at least 1; forcing to 1\n");
num_mb = 1;
}
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
@@ -2804,7 +2815,7 @@ static int __init tcrypt_mod_init(void)
pr_debug("all tests passed\n");
}
/* We intentionaly return -EAGAIN to prevent keeping the module,
/* We intentionally return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.

View File

@@ -2,7 +2,7 @@
/*
* Cryptographic API.
*
* TEA, XTEA, and XETA crypto alogrithms
* TEA, XTEA, and XETA crypto algorithms
*
* The TEA and Xtended TEA algorithms were developed by David Wheeler
* and Roger Needham at the Computer Laboratory of Cambridge University.

View File

@@ -4079,6 +4079,20 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = __VECS(aegis128_tv_template)
}
}, {
.alg = "authenc(hmac(md5),cbc(aes))",
.generic_driver = "authenc(hmac-md5-lib,cbc(aes-lib))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(md5),cbc(des))",
.generic_driver = "authenc(hmac-md5-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(md5),cbc(des3_ede))",
.generic_driver = "authenc(hmac-md5-lib,cbc(des3_ede-generic))",
@@ -4093,6 +4107,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
}
}, {
.alg = "authenc(hmac(md5),rfc3686(ctr(aes)))",
.generic_driver = "authenc(hmac-md5-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.generic_driver = "authenc(hmac-sha1-lib,cbc(aes-lib))",
@@ -4128,12 +4149,17 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.test = alg_test_null,
.generic_driver = "authenc(hmac-sha1-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha1_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),cbc(aes))",
.generic_driver = "authenc(hmac-sha224-lib,cbc(aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha224_aes_cbc_tv_temp)
}
@@ -4153,8 +4179,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
.test = alg_test_null,
.generic_driver = "authenc(hmac-sha224-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha224_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.generic_driver = "authenc(hmac-sha256-lib,cbc(aes-lib))",
@@ -4190,12 +4220,17 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.generic_driver = "authenc(hmac-sha256-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha256_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),cbc(aes))",
.generic_driver = "authenc(hmac-sha384-lib,cbc(aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha384_aes_cbc_tv_temp)
}
@@ -4226,8 +4261,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.test = alg_test_null,
.generic_driver = "authenc(hmac-sha384-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha384_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.generic_driver = "authenc(hmac-sha512-lib,cbc(aes-lib))",
@@ -4256,8 +4295,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
.test = alg_test_null,
.generic_driver = "authenc(hmac-sha512-lib,rfc3686(ctr(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha512_aes_ctr_rfc3686_tv_temp)
}
}, {
.alg = "blake2b-160",
.generic_driver = "blake2b-160-lib",

File diff suppressed because it is too large Load Diff

View File

@@ -76,7 +76,7 @@ static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* multiple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,

View File

@@ -301,6 +301,7 @@ config CRYPTO_DEV_PPC4XX
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -490,7 +491,7 @@ config CRYPTO_DEV_ATMEL_ECC
select CRYPTO_ECDH
select CRC16
help
Microhip / Atmel ECC hw accelerator.
Microchip / Atmel ECC hw accelerator.
Select this if you want to use the Microchip / Atmel module for
ECDH algorithm.
@@ -504,7 +505,7 @@ config CRYPTO_DEV_ATMEL_SHA204A
select HW_RANDOM
select CRC16
help
Microhip / Atmel SHA accelerator and RNG.
Microchip / Atmel SHA accelerator and RNG.
Select this if you want to use the Microchip / Atmel SHA204A
module as a random number generator. (Other functions of the
chip are currently not exposed by this driver)
@@ -667,14 +668,6 @@ config CRYPTO_DEV_QCOM_RNG
To compile this driver as a module, choose M here. The
module will be called qcom-rng. If unsure, say N.
#config CRYPTO_DEV_VMX
# bool "Support for VMX cryptographic acceleration instructions"
# depends on PPC64 && VSX
# help
# Support for VMX cryptographic acceleration instructions.
#
#source "drivers/crypto/vmx/Kconfig"
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST

View File

@@ -38,7 +38,6 @@ obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/

View File

@@ -14,6 +14,7 @@ config CRYPTO_DEV_SUN4I_SS
select CRYPTO_SHA1
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
Some Allwinner SoC have a crypto accelerator named
@@ -49,6 +50,7 @@ config CRYPTO_DEV_SUN8I_CE
select CRYPTO_CBC
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_RNG
depends on CRYPTO_DEV_ALLWINNER
depends on PM
help

View File

@@ -182,8 +182,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
final = true;
} else
length -= remain;
scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
rctx->offset, length, 0);
memcpy_from_sglist(hash_engine->ahash_src_addr, rctx->src_sg, rctx->offset, length);
aspeed_ahash_update_counter(rctx, length);
if (final)
length += aspeed_ahash_fill_padding(

View File

@@ -2131,7 +2131,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
{
free_page((unsigned long)dd->buf);
free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER);
}
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
@@ -2270,10 +2270,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
crypto_unregister_aeads(aes_authenc_algs, i);
crypto_unregister_skcipher(&aes_xts_alg);
if (dd->caps.has_xts)
crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:

View File

@@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
if (IS_ERR(fallback)) {
dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
atmel_ecc_i2c_client_free(ctx->client);
return PTR_ERR(fallback);
}

View File

@@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
{
if (addr < 0 || addr > OTP_ZONE_SIZE)
return -1;
if (addr >= OTP_ZONE_SIZE / 4)
return -EINVAL;
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
@@ -370,7 +370,7 @@ int atmel_i2c_probe(struct i2c_client *client)
}
}
if (bus_clk_rate > 1000000L) {
if (bus_clk_rate > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;

View File

@@ -404,20 +404,13 @@ static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
{
struct atmel_sha_dev *dd = NULL;
struct atmel_sha_dev *tmp;
struct atmel_sha_dev *dd;
spin_lock_bh(&atmel_sha.lock);
if (!tctx->dd) {
list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
dd = tmp;
break;
}
tctx->dd = dd;
} else {
dd = tctx->dd;
}
if (!tctx->dd)
tctx->dd = list_first_entry_or_null(&atmel_sha.dev_list,
struct atmel_sha_dev, list);
dd = tctx->dd;
spin_unlock_bh(&atmel_sha.lock);
return dd;

View File

@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "atmel-i2c.h"
@@ -95,19 +96,24 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
{
struct atmel_i2c_cmd cmd;
int ret = -1;
int ret;
if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
ret = atmel_i2c_init_read_otp_cmd(&cmd, addr);
if (ret < 0) {
dev_err(&client->dev, "failed, invalid otp address %04X\n",
addr);
return ret;
}
ret = atmel_i2c_send_receive(client, &cmd);
if (ret < 0) {
dev_err(&client->dev, "failed to read otp at %04X\n", addr);
return ret;
}
if (cmd.data[0] == 0xff) {
dev_err(&client->dev, "failed, device not ready\n");
return -EINVAL;
return -EIO;
}
memcpy(otp, cmd.data+1, 4);
@@ -120,21 +126,22 @@ static ssize_t otp_show(struct device *dev,
{
u16 addr;
u8 otp[OTP_ZONE_SIZE];
char *str = buf;
struct i2c_client *client = to_i2c_client(dev);
int i;
ssize_t len = 0;
int i, ret;
for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
for (addr = 0; addr < OTP_ZONE_SIZE / 4; addr++) {
ret = atmel_sha204a_otp_read(client, addr, otp + addr * 4);
if (ret < 0) {
dev_err(dev, "failed to read otp zone\n");
break;
return ret;
}
}
for (i = 0; i < addr*2; i++)
str += sprintf(str, "%02X", otp[i]);
str += sprintf(str, "\n");
return str - buf;
for (i = 0; i < OTP_ZONE_SIZE; i++)
len += sysfs_emit_at(buf, len, "%02X", otp[i]);
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static DEVICE_ATTR_RO(otp);
@@ -174,10 +181,6 @@ static int atmel_sha204a_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
/* otp read out */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups);
if (ret) {
dev_err(&client->dev, "failed to register sysfs entry\n");
@@ -191,10 +194,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
if (atomic_read(&i2c_priv->tfm_count)) {
dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
return;
}
devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng);
atmel_i2c_flush_queue();
sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);

View File

@@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
@@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,

View File

@@ -1323,7 +1323,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
artpec6_crypto_init_dma_operation(common);
/* Upload HMAC key, must be first the first packet */
/* Upload HMAC key, it must be the first packet */
if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
if (variant == ARTPEC6_CRYPTO) {
req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
@@ -1333,11 +1333,8 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
a7_regk_crypto_dlkey);
}
/* Copy and pad up the key */
memcpy(req_ctx->key_buffer, ctx->hmac_key,
ctx->hmac_key_length);
memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
blocksize - ctx->hmac_key_length);
memcpy_and_pad(req_ctx->key_buffer, blocksize, ctx->hmac_key,
ctx->hmac_key_length, 0);
error = artpec6_crypto_setup_out_descr(common,
(void *)&req_ctx->key_md,

View File

@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -3269,7 +3270,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -3289,7 +3290,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -4645,16 +4646,12 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg = &halg->halg.base;
if (keyed) {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
strscpy(alg->cra_name, template->hmac_name);
strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
strscpy(alg->cra_name, template->name);
strscpy(alg->cra_driver_name, template->driver_name);
t_alg->ahash_alg.setkey = NULL;
t_alg->is_hmac = false;
}

View File

@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
print_hex_dump_devel("key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
print_hex_dump_devel("digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -1914,16 +1914,12 @@ caam_hash_alloc(struct caam_hash_template *template,
alg = &halg->halg.base;
if (keyed) {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
strscpy(alg->cra_name, template->hmac_name);
strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
strscpy(alg->cra_name, template->name);
strscpy(alg->cra_driver_name, template->driver_name);
halg->setkey = NULL;
t_alg->is_hmac = false;
}

View File

@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/algapi.h>
@@ -223,9 +224,8 @@ static int ccp_register_aes_aead(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_aead->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
strscpy(alg->base.cra_name, def->name);
strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
ret = crypto_register_aead(alg);

View File

@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <crypto/aes.h>
#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
@@ -239,9 +240,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
strscpy(alg->base.cra_name, def->name);
strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |

View File

@@ -305,9 +305,8 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
strscpy(alg->base.cra_name, def->name);
strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;

View File

@@ -193,9 +193,8 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
strscpy(alg->base.cra_name, def->name);
strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;

View File

@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/internal/rsa.h>
@@ -257,9 +258,8 @@ static int ccp_register_rsa_alg(struct list_head *head,
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
strscpy(alg->base.cra_name, def->name);
strscpy(alg->base.cra_driver_name, def->driver_name);
ret = crypto_register_akcipher(alg);
if (ret) {
pr_err("%s akcipher algorithm registration error (%d)\n",

View File

@@ -484,9 +484,8 @@ static int ccp_register_sha_alg(struct list_head *head,
halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
strscpy(base->cra_name, def->name);
strscpy(base->cra_driver_name, def->drv_name);
base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |

View File

@@ -1965,11 +1965,11 @@ static int sev_get_firmware(struct device *dev,
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
struct sev_data_download_firmware *data;
struct sev_data_download_firmware data;
const struct firmware *firmware;
int ret, error, order;
struct page *p;
u64 data_size;
void *fw_blob;
if (!sev_version_greater_or_equal(0, 15)) {
dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
@@ -1981,16 +1981,7 @@ static int sev_update_firmware(struct device *dev)
return -1;
}
/*
* SEV FW expects the physical address given to it to be 32
* byte aligned. Memory allocated has structure placed at the
* beginning followed by the firmware being passed to the SEV
* FW. Allocate enough memory for data structure + alignment
* padding + SEV FW.
*/
data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
order = get_order(firmware->size + data_size);
order = get_order(firmware->size);
p = alloc_pages(GFP_KERNEL, order);
if (!p) {
ret = -1;
@@ -2001,20 +1992,20 @@ static int sev_update_firmware(struct device *dev)
* Copy firmware data to a kernel allocated contiguous
* memory region.
*/
data = page_address(p);
memcpy(page_address(p) + data_size, firmware->data, firmware->size);
fw_blob = page_address(p);
memcpy(fw_blob, firmware->data, firmware->size);
data->address = __psp_pa(page_address(p) + data_size);
data->len = firmware->size;
data.address = __psp_pa(fw_blob);
data.len = firmware->size;
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
/*
* A quirk for fixing the committed TCB version, when upgrading from
* earlier firmware version than 1.50.
*/
if (!ret && !sev_version_greater_or_equal(1, 50))
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);

View File

@@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_request *req)
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}

View File

@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
@@ -2256,8 +2257,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat
alg->alg.init = hifn_init_tfm;
err = -EINVAL;
if (snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s", t->name) >= CRYPTO_MAX_ALG_NAME)
if (strscpy(alg->alg.base.cra_name, t->name) < 0)
goto out_free_alg;
if (snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s-%s", t->drv_name, dev->name) >= CRYPTO_MAX_ALG_NAME)
@@ -2367,7 +2367,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->alg_list);
snprintf(dev->name, sizeof(dev->name), "%s", name);
strscpy(dev->name, name);
spin_lock_init(&dev->lock);
for (i = 0; i < 3; ++i) {

View File

@@ -45,8 +45,8 @@ struct qm_dfx_item {
struct qm_cmd_dump_item {
const char *cmd;
char *info_name;
int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
const char *info_name;
int (*dump_fn)(struct hisi_qm *qm, char *cmd, const char *info_name);
};
static struct qm_dfx_item qm_dfx_files[] = {
@@ -151,7 +151,7 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
}
static void dump_show(struct hisi_qm *qm, void *info,
unsigned int info_size, char *info_name)
unsigned int info_size, const char *info_name)
{
struct device *dev = &qm->pdev->dev;
u8 *info_curr = info;
@@ -165,7 +165,7 @@ static void dump_show(struct hisi_qm *qm, void *info,
}
}
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_sqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_sqc sqc;
@@ -202,7 +202,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_cqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_cqc cqc;
@@ -239,7 +239,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_aeqc aeqc;
@@ -305,7 +305,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, e_id);
if (ret || *e_id >= q_depth) {
dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
dev_err(dev, "Please input sqe num (0-%d)", q_depth - 1);
return -EINVAL;
}
@@ -317,7 +317,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
return 0;
}
static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_sq_dump(struct hisi_qm *qm, char *s, const char *name)
{
u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
@@ -345,7 +345,7 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_cq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
@@ -363,7 +363,7 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
u16 xeq_depth;
@@ -388,7 +388,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
}
if (xeqe_id >= xeq_depth) {
dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
dev_err(dev, "Please input eqe or aeqe num (0-%d)", xeq_depth - 1);
return -EINVAL;
}
@@ -1040,6 +1040,57 @@ void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
}
}
static int qm_usage_percent(struct hisi_qm *qm, int chan_num)
{
u32 val, used_bw, total_bw;
val = readl(qm->io_base + QM_CHANNEL_USAGE_OFFSET +
chan_num * QM_CHANNEL_ADDR_INTRVL);
used_bw = lower_16_bits(val);
total_bw = upper_16_bits(val);
if (!total_bw)
return -EIO;
if (total_bw <= used_bw)
return QM_MAX_DEV_USAGE;
return (used_bw * QM_DEV_USAGE_RATE) / total_bw;
}
static int qm_usage_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
bool dev_is_active = true;
int i, ret;
/* If device is in suspended, usage is 0. */
ret = hisi_qm_get_dfx_access(qm);
if (ret == -EAGAIN) {
dev_is_active = false;
} else if (ret) {
dev_err(&qm->pdev->dev, "failed to get dfx access for usage_show!\n");
return ret;
}
ret = 0;
for (i = 0; i < qm->channel_data.channel_num; i++) {
if (dev_is_active) {
ret = qm_usage_percent(qm, i);
if (ret < 0) {
hisi_qm_put_dfx_access(qm);
return ret;
}
}
seq_printf(s, "%s: %d\n", qm->channel_data.channel_name[i], ret);
}
if (dev_is_active)
hisi_qm_put_dfx_access(qm);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(qm_usage);
static int qm_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1159,6 +1210,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
qm, &qm_diff_regs_fops);
if (qm->ver >= QM_HW_V5)
debugfs_create_file("dev_usage", 0444, qm->debug.debug_root, qm, &qm_usage_fops);
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);

View File

@@ -1327,17 +1327,9 @@ static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
struct device *dev = ctx->dev;
int ret;
ret = crypto_get_default_rng();
if (ret) {
dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
return ret;
}
ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
params->key_size);
crypto_put_default_rng();
ret = crypto_stdrng_get_bytes(params->key, params->key_size);
if (ret)
dev_err(dev, "failed to get rng, ret = %d!\n", ret);
dev_err(dev, "failed to get random bytes, ret = %d!\n", ret);
return ret;
}

View File

@@ -121,6 +121,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
#define HPRE_MAX_CHANNEL_NUM 2
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -370,6 +372,11 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
static const char *hpre_channel_name[HPRE_MAX_CHANNEL_NUM] = {
"RSA",
"ECC",
};
static const struct hisi_qm_err_ini hpre_err_ini;
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
@@ -1234,6 +1241,16 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
static void hpre_set_channels(struct hisi_qm *qm)
{
struct qm_channel *channel_data = &qm->channel_data;
int i;
channel_data->channel_num = HPRE_MAX_CHANNEL_NUM;
for (i = 0; i < HPRE_MAX_CHANNEL_NUM; i++)
channel_data->channel_name[i] = hpre_channel_name[i];
}
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1267,6 +1284,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
hpre_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = hpre_pre_store_cap_reg(qm);
if (ret) {

View File

@@ -472,6 +472,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
static void hisi_qm_stop_qp(struct hisi_qp *qp);
int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
unsigned int device)
{
@@ -2262,7 +2264,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
* After this function, qp can receive request from user. Return 0 if
* successful, negative error code if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
static int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
int ret;
@@ -2273,7 +2275,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* qp_stop_fail_cb() - call request cb.
@@ -2418,13 +2419,12 @@ static void qm_stop_qp_nolock(struct hisi_qp *qp)
*
* This function is reverse of hisi_qm_start_qp.
*/
void hisi_qm_stop_qp(struct hisi_qp *qp)
static void hisi_qm_stop_qp(struct hisi_qp *qp)
{
down_write(&qp->qm->qps_lock);
qm_stop_qp_nolock(qp);
up_write(&qp->qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
/**
* hisi_qp_send() - Queue up a task in the hardware queue.
@@ -3381,7 +3381,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
int ret = 0;
int ret;
down_write(&qm->qps_lock);
@@ -3917,8 +3917,8 @@ back_func_qos:
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
{
u64 cir_u = 0, cir_b = 0, cir_s = 0;
u64 shaper_vft, ir_calc, ir;
u64 cir_u, cir_b, cir_s;
unsigned int val;
u32 error_rate;
int ret;
@@ -4278,8 +4278,8 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return hisi_qm_sriov_disable(pdev, false);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
return hisi_qm_sriov_enable(pdev, num_vfs);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);

View File

@@ -844,7 +844,7 @@ err_free_elements:
if (crypto_skcipher_ivsize(atfm))
dma_unmap_single(info->dev, sec_req->dma_iv,
crypto_skcipher_ivsize(atfm),
DMA_BIDIRECTIONAL);
DMA_TO_DEVICE);
err_unmap_out_sg:
if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,

View File

@@ -285,7 +285,5 @@ enum sec_cap_table_type {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif

View File

@@ -230,7 +230,7 @@ static int qp_send_message(struct sec_req *req)
spin_unlock_bh(&qp_ctx->req_lock);
atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
atomic64_inc(&qp_ctx->ctx->sec->debug.dfx.send_cnt);
return -EINPROGRESS;
}

View File

@@ -133,6 +133,8 @@
#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
GENMASK_ULL(45, 43))
#define SEC_MAX_CHANNEL_NUM 1
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -907,7 +909,7 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
}
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
sec_debugfs_atomic64_set, "%lld\n");
sec_debugfs_atomic64_set, "%llu\n");
static int sec_regs_show(struct seq_file *s, void *unused)
{
@@ -1288,6 +1290,14 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
static void sec_set_channels(struct hisi_qm *qm)
{
struct qm_channel *channel_data = &qm->channel_data;
channel_data->channel_num = SEC_MAX_CHANNEL_NUM;
channel_data->channel_name[0] = "SEC";
}
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1325,6 +1335,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
sec_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = sec_pre_store_cap_reg(qm);
if (ret) {

View File

@@ -122,6 +122,8 @@
#define HZIP_LIT_LEN_EN_OFFSET 0x301204
#define HZIP_LIT_LEN_EN_EN BIT(4)
#define HZIP_MAX_CHANNEL_NUM 3
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -359,6 +361,12 @@ static struct dfx_diff_registers hzip_diff_regs[] = {
},
};
static const char *zip_channel_name[HZIP_MAX_CHANNEL_NUM] = {
"COMPRESS",
"DECOMPRESS",
"DAE"
};
static int hzip_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1400,6 +1408,16 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
static void zip_set_channels(struct hisi_qm *qm)
{
struct qm_channel *channel_data = &qm->channel_data;
int i;
channel_data->channel_num = HZIP_MAX_CHANNEL_NUM;
for (i = 0; i < HZIP_MAX_CHANNEL_NUM; i++)
channel_data->channel_name[i] = zip_channel_name[i];
}
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1438,6 +1456,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
zip_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = zip_pre_store_cap_reg(qm);
if (ret) {

View File

@@ -629,24 +629,14 @@ static int img_hash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
struct img_hash_dev *hdev = NULL;
struct img_hash_dev *tmp;
int err;
spin_lock(&img_hash.lock);
if (!tctx->hdev) {
list_for_each_entry(tmp, &img_hash.dev_list, list) {
hdev = tmp;
break;
}
tctx->hdev = hdev;
} else {
hdev = tctx->hdev;
}
if (!tctx->hdev)
tctx->hdev = list_first_entry_or_null(&img_hash.dev_list,
struct img_hash_dev, list);
ctx->hdev = tctx->hdev;
spin_unlock(&img_hash.lock);
ctx->hdev = hdev;
ctx->flags = 0;
ctx->digsize = crypto_ahash_digestsize(tfm);
@@ -675,9 +665,7 @@ static int img_hash_digest(struct ahash_request *req)
ctx->sgfirst = req->src;
ctx->nents = sg_nents(ctx->sg);
err = img_hash_handle_queue(tctx->hdev, req);
return err;
return img_hash_handle_queue(ctx->hdev, req);
}
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config CRYPTO_DEV_EIP93
tristate "Support for EIP93 crypto HW accelerators"
depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
depends on SOC_MT7621 || ARCH_AIROHA || ECONET || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aead.h>

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AEAD_H_
#define _EIP93_AEAD_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AES_H_
#define _EIP93_AES_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
@@ -320,7 +320,7 @@ struct eip93_alg_template eip93_alg_ecb_des = {
.ivsize = 0,
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "ebc(des-eip93)",
.cra_driver_name = "ecb(des-eip93)",
.cra_priority = EIP93_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_CIPHER_H_
#define _EIP93_CIPHER_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_COMMON_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_DES_H_
#define _EIP93_DES_H_

View File

@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024
*
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/sha1.h>

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_HASH_H_
#define _EIP93_HASH_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <linux/atomic.h>
@@ -36,6 +36,14 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_cbc_aes,
&eip93_alg_ctr_aes,
&eip93_alg_rfc3686_aes,
&eip93_alg_md5,
&eip93_alg_sha1,
&eip93_alg_sha224,
&eip93_alg_sha256,
&eip93_alg_hmac_md5,
&eip93_alg_hmac_sha1,
&eip93_alg_hmac_sha224,
&eip93_alg_hmac_sha256,
&eip93_alg_authenc_hmac_md5_cbc_des,
&eip93_alg_authenc_hmac_sha1_cbc_des,
&eip93_alg_authenc_hmac_sha224_cbc_des,
@@ -52,14 +60,6 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_authenc_hmac_sha1_rfc3686_aes,
&eip93_alg_authenc_hmac_sha224_rfc3686_aes,
&eip93_alg_authenc_hmac_sha256_rfc3686_aes,
&eip93_alg_md5,
&eip93_alg_sha1,
&eip93_alg_sha224,
&eip93_alg_sha256,
&eip93_alg_hmac_md5,
&eip93_alg_hmac_sha1,
&eip93_alg_hmac_sha224,
&eip93_alg_hmac_sha256,
};
inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_MAIN_H_
#define _EIP93_MAIN_H_

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com
* Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef REG_EIP93_H
#define REG_EIP93_H
@@ -109,7 +109,7 @@
#define EIP93_REG_PE_BUF_THRESH 0x10c
#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
#define EIP93_REG_PE_INBUF_COUNT 0x100
#define EIP93_REG_PE_INBUF_COUNT 0x110
#define EIP93_REG_PE_OUTBUF_COUNT 0x114
#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */

View File

@@ -1204,12 +1204,13 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha256,
&safexcel_alg_hmac_sha384,
&safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_md5_cbc_aes,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
&safexcel_alg_authenc_hmac_sha384_cbc_aes,
&safexcel_alg_authenc_hmac_sha512_cbc_aes,
&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
&safexcel_alg_authenc_hmac_md5_ctr_aes,
&safexcel_alg_authenc_hmac_sha1_ctr_aes,
&safexcel_alg_authenc_hmac_sha224_ctr_aes,
&safexcel_alg_authenc_hmac_sha256_ctr_aes,
@@ -1241,11 +1242,14 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha3_256,
&safexcel_alg_hmac_sha3_384,
&safexcel_alg_hmac_sha3_512,
&safexcel_alg_authenc_hmac_sha1_cbc_des,
&safexcel_alg_authenc_hmac_md5_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
&safexcel_alg_authenc_hmac_md5_cbc_des,
&safexcel_alg_authenc_hmac_sha1_cbc_des,
&safexcel_alg_authenc_hmac_sha256_cbc_des,
&safexcel_alg_authenc_hmac_sha224_cbc_des,
&safexcel_alg_authenc_hmac_sha512_cbc_des,

View File

@@ -945,12 +945,13 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes;
@@ -982,11 +983,14 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;

View File

@@ -17,6 +17,7 @@
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
#include <crypto/md5.h>
#include <crypto/poly1305.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -462,6 +463,9 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
/* Auth key */
switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_MD5:
alg = "safexcel-md5";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
alg = "safexcel-sha1";
break;
@@ -1662,6 +1666,42 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
return 0;
}
static int safexcel_aead_md5_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
ctx->state_sz = MD5_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-md5-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_md5_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1842,6 +1882,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
},
};
static int safexcel_aead_md5_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_md5_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_md5_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2027,6 +2104,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
},
};
static int safexcel_aead_md5_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_md5_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_md5_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2212,6 +2326,41 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
},
};
static int safexcel_aead_md5_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_md5_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-md5-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_md5_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);

View File

@@ -906,8 +906,8 @@ static void rebalance_wq_table(void)
return;
}
cpu = 0;
for_each_node_with_cpus(node) {
cpu = 0;
node_cpus = cpumask_of_node(node);
for_each_cpu(node_cpu, node_cpus) {

View File

@@ -230,12 +230,7 @@ static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
int rc = 0;
/* Generate random nbytes for Simple and Differential SCA protection. */
rc = crypto_get_default_rng();
if (rc)
return rc;
rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
crypto_put_default_rng();
rc = crypto_stdrng_get_bytes(sca, nbytes);
if (rc)
return rc;
@@ -509,14 +504,10 @@ static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
* This condition is met by the default RNG because it selects a favored
* DRBG with a security strength of 256.
* This condition is met by stdrng because it selects a favored DRBG
* with a security strength of 256.
*/
if (crypto_get_default_rng())
return -EFAULT;
rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
rc = crypto_stdrng_get_bytes(priv, nbytes);
if (rc)
goto cleanup;

View File

@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_DEV_QAT
tristate
select CRYPTO_ACOMP
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
@@ -11,6 +12,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_LIB_SHA1
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
select CRYPTO_ZSTD
select FW_LOADER
select CRC8

View File

@@ -97,9 +97,25 @@ static struct adf_hw_device_class adf_420xx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 me_disable = self->fuses[ADF_FUSECTL4];
unsigned long fuses = self->fuses[ADF_FUSECTL4];
u32 mask = ADF_420XX_ACCELENGINES_MASK;
return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
if (test_bit(0, &fuses))
mask &= ~ADF_AE_GROUP_0;
if (test_bit(4, &fuses))
mask &= ~ADF_AE_GROUP_1;
if (test_bit(8, &fuses))
mask &= ~ADF_AE_GROUP_2;
if (test_bit(12, &fuses))
mask &= ~ADF_AE_GROUP_3;
if (test_bit(16, &fuses))
mask &= ~ADF_AE_GROUP_4;
return mask;
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -472,6 +488,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);

View File

@@ -100,9 +100,19 @@ static struct adf_hw_device_class adf_4xxx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 me_disable = self->fuses[ADF_FUSECTL4];
unsigned long fuses = self->fuses[ADF_FUSECTL4];
u32 mask = ADF_4XXX_ACCELENGINES_MASK;
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
if (test_bit(0, &fuses))
mask &= ~ADF_AE_GROUP_0;
if (test_bit(4, &fuses))
mask &= ~ADF_AE_GROUP_1;
if (test_bit(8, &fuses))
mask &= ~ADF_AE_GROUP_2;
return mask;
}
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
@@ -463,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);

View File

@@ -33,6 +33,8 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
#define ASB_MULTIPLIER 9
struct adf_ring_config {
u32 ring_mask;
enum adf_cfg_service_type ring_type;
@@ -82,10 +84,15 @@ static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
};
static const unsigned long thrd_mask_wcy[ADF_6XXX_MAX_ACCELENGINES] = {
0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00
};
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
[ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
[ADF_FW_WCY_OBJ] = ADF_6XXX_WCY_OBJ,
};
static const struct adf_fw_config adf_default_fw_config[] = {
@@ -94,6 +101,12 @@ static const struct adf_fw_config adf_default_fw_config[] = {
{ ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
};
static const struct adf_fw_config adf_wcy_fw_config[] = {
{ ADF_AE_GROUP_1, ADF_FW_WCY_OBJ },
{ ADF_AE_GROUP_0, ADF_FW_WCY_OBJ },
{ ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
};
static struct adf_hw_device_class adf_6xxx_class = {
.name = ADF_6XXX_DEVICE_NAME,
.type = DEV_6XXX,
@@ -118,6 +131,12 @@ static bool services_supported(unsigned long mask)
}
}
static bool wcy_services_supported(unsigned long mask)
{
/* The wireless SKU supports only the symmetric crypto service */
return mask == BIT(SVC_SYM);
}
static int get_service(unsigned long *mask)
{
if (test_and_clear_bit(SVC_ASYM, mask))
@@ -155,8 +174,12 @@ static enum adf_cfg_service_type get_ring_type(unsigned int service)
}
}
static const unsigned long *get_thrd_mask(unsigned int service)
static const unsigned long *get_thrd_mask(struct adf_accel_dev *accel_dev,
unsigned int service)
{
if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
return (service == SVC_SYM) ? thrd_mask_wcy : NULL;
switch (service) {
case SVC_SYM:
return thrd_mask_sym;
@@ -194,7 +217,7 @@ static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config
return service;
rp_config[i].ring_type = get_ring_type(service);
rp_config[i].thrd_mask = get_thrd_mask(service);
rp_config[i].thrd_mask = get_thrd_mask(accel_dev, service);
/*
* If there is only one service enabled, use all ring pairs for
@@ -386,6 +409,8 @@ static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWCPL_OFFSET, ADF_SSMWDTWCPH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWATL_OFFSET, ADF_SSMWDTWATH_OFFSET, val);
/* Enable watchdog timer for pke */
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
@@ -439,6 +464,21 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
return 0;
}
static bool adf_anti_rb_enabled(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
return !!(hw_data->fuses[0] & ADF_GEN6_ANTI_RB_FUSE_BIT);
}
static void adf_gen6_init_anti_rb(struct adf_anti_rb_hw_data *anti_rb_data)
{
anti_rb_data->anti_rb_enabled = adf_anti_rb_enabled;
anti_rb_data->svncheck_offset = ADF_GEN6_SVNCHECK_CSR_MSG;
anti_rb_data->svncheck_retry = 0;
anti_rb_data->sysfs_added = false;
}
static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@@ -471,6 +511,9 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
break;
case QAT_ZSTD:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS;
break;
default:
return -EINVAL;
}
@@ -481,6 +524,13 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
/*
* Store Auto Select Best (ASB) multiplier in the request template.
* This will be used in the data path to set the actual threshold
* value based on the input data size.
*/
req_tmpl->u3.asb_threshold.asb_value = ASB_MULTIPLIER;
return 0;
}
@@ -494,12 +544,16 @@ static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
break;
case QAT_ZSTD:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS;
break;
default:
return -EINVAL;
}
cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
req_tmpl->u3.asb_threshold.asb_value = 0;
return 0;
}
@@ -631,6 +685,12 @@ static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
return set_vc_config(accel_dev);
}
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ? adf_wcy_fw_config :
adf_default_fw_config;
}
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
unsigned long fuses = self->fuses[ADF_FUSECTL4];
@@ -653,6 +713,38 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return mask;
}
static u32 get_accel_cap_wcy(struct adf_accel_dev *accel_dev)
{
u32 capabilities_sym;
u32 fuse;
fuse = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
ICP_ACCEL_CAPABILITIES_5G |
ICP_ACCEL_CAPABILITIES_ZUC |
ICP_ACCEL_CAPABILITIES_ZUC_256 |
ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
if (fuse & ICP_ACCEL_GEN6_MASK_EIA3_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
}
if (fuse & ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
if (fuse & ICP_ACCEL_GEN6_MASK_5G_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_5G;
if (adf_get_service_enabled(accel_dev) == SVC_SYM)
return capabilities_sym;
return 0;
}
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
u32 capabilities_sym, capabilities_asym;
@@ -661,6 +753,9 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 caps = 0;
u32 fusectl1;
if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
return get_accel_cap_wcy(accel_dev);
fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
/* Read accelerator capabilities mask */
@@ -733,15 +828,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
return ARRAY_SIZE(adf_default_fw_config);
return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ?
ARRAY_SIZE(adf_wcy_fw_config) :
ARRAY_SIZE(adf_default_fw_config);
}
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
{
int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
const struct adf_fw_config *fw_config;
int id;
id = adf_default_fw_config[obj_num].obj;
fw_config = get_fw_config(accel_dev);
id = fw_config[obj_num].obj;
if (id >= num_fw_objs)
return NULL;
@@ -755,15 +854,22 @@ static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_nu
static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
if (obj_num >= uof_get_num_objs(accel_dev))
return -EINVAL;
return adf_default_fw_config[obj_num].obj;
fw_config = get_fw_config(accel_dev);
return fw_config[obj_num].obj;
}
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
return adf_default_fw_config[obj_num].ae_mask;
const struct adf_fw_config *fw_config;
fw_config = get_fw_config(accel_dev);
return fw_config[obj_num].ae_mask;
}
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
@@ -873,6 +979,14 @@ static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
init_num_svc_aes(rl_data);
}
static void adf_gen6_init_services_supported(struct adf_hw_device_data *hw_data)
{
if (adf_6xxx_is_wcy(hw_data))
hw_data->services_supported = wcy_services_supported;
else
hw_data->services_supported = services_supported;
}
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -929,11 +1043,12 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->stop_timer = adf_timer_stop;
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
hw_data->services_supported = services_supported;
hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD;
adf_gen6_init_services_supported(hw_data);
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
@@ -941,6 +1056,7 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
adf_gen6_init_ras_ops(&hw_data->ras_ops);
adf_gen6_init_tl_data(&hw_data->tl_data);
adf_gen6_init_rl_data(&hw_data->rl_data);
adf_gen6_init_anti_rb(&hw_data->anti_rb_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)

View File

@@ -53,6 +53,12 @@
#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
/* Anti-rollback */
#define ADF_GEN6_SVNCHECK_CSR_MSG 0x640004
/* Fuse bits */
#define ADF_GEN6_ANTI_RB_FUSE_BIT BIT(24)
/*
* Watchdog timers
* Timeout is in cycles. Clock speed may vary across products but this
@@ -64,10 +70,14 @@
#define ADF_SSMWDTATHH_OFFSET 0x520C
#define ADF_SSMWDTCNVL_OFFSET 0x5408
#define ADF_SSMWDTCNVH_OFFSET 0x540C
#define ADF_SSMWDTWCPL_OFFSET 0x5608
#define ADF_SSMWDTWCPH_OFFSET 0x560C
#define ADF_SSMWDTUCSL_OFFSET 0x5808
#define ADF_SSMWDTUCSH_OFFSET 0x580C
#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
#define ADF_SSMWDTWATL_OFFSET 0x5C08
#define ADF_SSMWDTWATH_OFFSET 0x5C0C
#define ADF_SSMWDTPKEL_OFFSET 0x5E08
#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
@@ -139,6 +149,7 @@
#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
#define ADF_6XXX_WCY_OBJ "qat_6xxx_wcy.bin"
/* RL constants */
#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
@@ -159,9 +170,18 @@ enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
ICP_ACCEL_GEN6_MASK_EIA3_SLICE = BIT(5),
ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE = BIT(7),
ICP_ACCEL_GEN6_MASK_5G_SLICE = BIT(8),
};
/* Return true if the device is a wireless crypto (WCY) SKU */
static inline bool adf_6xxx_is_wcy(struct adf_hw_device_data *hw_data)
{
return !(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE);
}
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);

View File

@@ -16,6 +16,7 @@
#include "adf_gen6_shared.h"
#include "adf_6xxx_hw_data.h"
#include "adf_heartbeat.h"
static int bar_map[] = {
0, /* SRAM */
@@ -53,6 +54,35 @@ static void adf_devmgr_remove(void *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
{
const char *config;
int ret;
/*
* Wireless SKU - symmetric crypto service only
* Non-wireless SKU - crypto service for even devices and compression for odd devices
*/
if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
config = ADF_CFG_SYM;
else
config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
if (ret)
return ret;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, config,
ADF_STR);
if (ret)
return ret;
adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
return 0;
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_pci *accel_pci_dev;
@@ -91,9 +121,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
/* Enable PCI device */
ret = pcim_enable_device(pdev);
if (ret)
@@ -182,8 +209,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = adf_dev_up(accel_dev, true);
if (ret)
if (ret) {
adf_dev_down(accel_dev);
return ret;
}
ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
if (ret)

View File

@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
adf_anti_rb.o \
adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
@@ -29,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_rl_admin.o \
adf_rl.o \
adf_sysfs.o \
adf_sysfs_anti_rb.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
adf_timer.o \
@@ -39,6 +41,7 @@ intel_qat-y := adf_accel_engine.o \
qat_bl.o \
qat_comp_algs.o \
qat_compression.o \
qat_comp_zstd_utils.o \
qat_crypto.o \
qat_hal.o \
qat_mig_dev.o \

View File

@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
#include "adf_anti_rb.h"
#include "adf_cfg_common.h"
#include "adf_dc.h"
#include "adf_rl.h"
@@ -58,6 +59,11 @@ enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
enum adf_accel_capabilities_ext {
ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S = BIT(0),
ADF_ACCEL_CAPABILITIES_EXT_ZSTD = BIT(1),
};
enum adf_fuses {
ADF_FUSECTL0,
ADF_FUSECTL1,
@@ -328,12 +334,14 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
struct adf_anti_rb_hw_data anti_rb_data;
struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses[ADF_MAX_FUSES];
u32 straps;
u32 accel_capabilities_mask;
u32 accel_capabilities_ext_mask;
u32 extended_dc_capabilities;
u16 fw_capabilities;
u32 clock_frequency;

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
@@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
unsigned long reset_delay;
qat_hal_reset(loader_data->fw_loader);
reset_delay = loader_data->fw_loader->chip_info->reset_delay_us;
if (reset_delay)
fsleep(reset_delay);
if (qat_hal_clr_reset(loader_data->fw_loader))
return -EFAULT;

View File

@@ -6,8 +6,10 @@
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
#include "adf_anti_rb.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_heartbeat.h"
@@ -19,6 +21,7 @@
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_ONE_AE 1
#define ADF_ADMIN_RETRY_MAX 60
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -536,6 +539,73 @@ int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
static int adf_send_admin_retry(struct adf_accel_dev *accel_dev, u8 cmd_id,
struct icp_qat_fw_init_admin_resp *resp,
unsigned int sleep_ms)
{
u32 admin_ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
struct icp_qat_fw_init_admin_req req = { };
unsigned int retries = ADF_ADMIN_RETRY_MAX;
int ret;
req.cmd_id = cmd_id;
do {
ret = adf_send_admin(accel_dev, &req, resp, admin_ae_mask);
if (!ret)
return 0;
if (resp->status != ICP_QAT_FW_INIT_RESP_STATUS_RETRY)
return ret;
msleep(sleep_ms);
} while (--retries);
return -ETIMEDOUT;
}
static int adf_send_admin_svn(struct adf_accel_dev *accel_dev, u8 cmd_id,
struct icp_qat_fw_init_admin_resp *resp)
{
return adf_send_admin_retry(accel_dev, cmd_id, resp, ADF_SVN_RETRY_MS);
}
int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn)
{
struct icp_qat_fw_init_admin_resp resp = { };
int ret;
ret = adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_READ, &resp);
if (ret)
return ret;
switch (cmd) {
case ARB_ENFORCED_MIN_SVN:
*svn = resp.enforced_min_svn;
break;
case ARB_PERMANENT_MIN_SVN:
*svn = resp.permanent_min_svn;
break;
case ARB_ACTIVE_SVN:
*svn = resp.active_svn;
break;
default:
*svn = 0;
dev_err(&GET_DEV(accel_dev),
"Unknown secure version number request\n");
ret = -EINVAL;
}
return ret;
}
int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev)
{
struct icp_qat_fw_init_admin_resp resp = { };
return adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_COMMIT, &resp);
}
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;

Some files were not shown because too many files have changed in this diff Show More