Merge tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux

Pull crypto library updates from Eric Biggers:

 - Add support for verifying ML-DSA signatures.

   ML-DSA (Module-Lattice-Based Digital Signature Algorithm) is a
   recently-standardized post-quantum (quantum-resistant) signature
   algorithm. It was known as Dilithium pre-standardization.

   The first use case in the kernel will be module signing. But there
   are also other users of RSA and ECDSA signatures in the kernel that
   might want to upgrade to ML-DSA eventually.

 - Improve the AES library:

     - Make the AES key expansion and single block encryption and
       decryption functions use the architecture-optimized AES code.
       Enable these optimizations by default.

     - Support preparing an AES key for encryption-only, using about
       half as much memory as a bidirectional key.

     - Replace the existing two generic implementations of AES with a
       single one.

 - Simplify how Adiantum message hashing is implemented. Remove the
   "nhpoly1305" crypto_shash in favor of direct lib/crypto/ support for
   NH hashing, and enable optimizations by default.

* tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (53 commits)
  lib/crypto: mldsa: Clarify the documentation for mldsa_verify() slightly
  lib/crypto: aes: Drop 'volatile' from aes_sbox and aes_inv_sbox
  lib/crypto: aes: Remove old AES en/decryption functions
  lib/crypto: aesgcm: Use new AES library API
  lib/crypto: aescfb: Use new AES library API
  crypto: omap - Use new AES library API
  crypto: inside-secure - Use new AES library API
  crypto: drbg - Use new AES library API
  crypto: crypto4xx - Use new AES library API
  crypto: chelsio - Use new AES library API
  crypto: ccp - Use new AES library API
  crypto: x86/aes-gcm - Use new AES library API
  crypto: arm64/ghash - Use new AES library API
  crypto: arm/ghash - Use new AES library API
  staging: rtl8723bs: core: Use new AES library API
  net: phy: mscc: macsec: Use new AES library API
  chelsio: Use new AES library API
  Bluetooth: SMP: Use new AES library API
  crypto: x86/aes - Remove the superseded AES-NI crypto_cipher
  lib/crypto: x86/aes: Add AES-NI optimization
  ...
This commit is contained in:
Linus Torvalds
2026-02-10 08:31:09 -08:00
141 changed files with 6668 additions and 5264 deletions

View File

@@ -455,11 +455,6 @@ API, but the filenames mode still does.
- Adiantum
- Mandatory:
- CONFIG_CRYPTO_ADIANTUM
- Recommended:
- arm32: CONFIG_CRYPTO_NHPOLY1305_NEON
- arm64: CONFIG_CRYPTO_NHPOLY1305_NEON
- x86: CONFIG_CRYPTO_NHPOLY1305_SSE2
- x86: CONFIG_CRYPTO_NHPOLY1305_AVX2
- AES-128-CBC-ESSIV and AES-128-CBC-CTS:
- Mandatory:

View File

@@ -98,7 +98,6 @@ CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
# CONFIG_CRYPTO_HW is not set

View File

@@ -1286,7 +1286,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m

View File

@@ -706,7 +706,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_DEV_OMAP=m
CONFIG_CRYPTO_DEV_OMAP_SHAM=m

View File

@@ -657,7 +657,7 @@ CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES=m
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y

View File

@@ -23,38 +23,9 @@ config CRYPTO_GHASH_ARM_CE
that is part of the ARMv8 Crypto Extensions, or a slower variant that
uses the vmull.p8 instruction that is part of the basic NEON ISA.
config CRYPTO_NHPOLY1305_NEON
tristate "Hash functions: NHPoly1305 (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function (Adiantum)
Architecture: arm using:
- NEON (Advanced SIMD) extensions
config CRYPTO_AES_ARM
tristate "Ciphers: AES"
select CRYPTO_ALGAPI
select CRYPTO_AES
help
Block ciphers: AES cipher algorithms (FIPS-197)
Architecture: arm
On ARM processors without the Crypto Extensions, this is the
fastest AES implementation for single blocks. For multiple
blocks, the NEON bit-sliced implementation is usually faster.
This implementation may be vulnerable to cache timing attacks,
since it uses lookup tables. However, as countermeasures it
disables IRQs and preloads the tables; it is hoped this makes
such attacks very difficult.
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
help

View File

@@ -3,15 +3,11 @@
# Arch-specific CryptoAPI modules.
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o

View File

@@ -1,69 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include "aes-cipher.h"
EXPORT_SYMBOL_GPL(__aes_arm_encrypt);
EXPORT_SYMBOL_GPL(__aes_arm_decrypt);
static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm_encrypt(ctx->key_enc, rounds, in, out);
}
static void aes_arm_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm_decrypt(ctx->key_dec, rounds, in, out);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-arm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = crypto_aes_set_key,
.cra_cipher.cia_encrypt = aes_arm_encrypt,
.cra_cipher.cia_decrypt = aes_arm_decrypt,
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
.cra_alignmask = 3,
#endif
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Scalar AES cipher for ARM");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");

View File

@@ -1,13 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef ARM_CRYPTO_AES_CIPHER_H
#define ARM_CRYPTO_AES_CIPHER_H
#include <linux/linkage.h>
#include <linux/types.h>
asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds,
const u8 *in, u8 *out);
asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
const u8 *in, u8 *out);
#endif /* ARM_CRYPTO_AES_CIPHER_H */

View File

@@ -12,7 +12,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
#include "aes-cipher.h"
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
@@ -48,13 +47,13 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
struct crypto_aes_ctx fallback;
struct aes_enckey fallback;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
struct crypto_aes_ctx fallback;
struct crypto_aes_ctx tweak_key;
struct aes_key fallback;
struct aes_enckey tweak_key;
};
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -122,14 +121,19 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
err = aes_expandkey(&ctx->fallback, in_key, key_len);
err = aes_prepareenckey(&ctx->fallback, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
/*
* Note: this assumes that the arm implementation of the AES library
* stores the standard round keys in k.rndkeys.
*/
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
aesbs_convert_key(ctx->key.rk, ctx->fallback.k.rndkeys,
ctx->key.rounds);
kernel_neon_end();
return 0;
@@ -152,8 +156,7 @@ static int cbc_encrypt(struct skcipher_request *req)
do {
crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
__aes_arm_encrypt(ctx->fallback.key_enc,
ctx->key.rounds, dst, dst);
aes_encrypt(&ctx->fallback, dst, dst);
prev = dst;
src += AES_BLOCK_SIZE;
dst += AES_BLOCK_SIZE;
@@ -239,10 +242,10 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
err = aes_expandkey(&ctx->fallback, in_key, key_len);
err = aes_preparekey(&ctx->fallback, in_key, key_len);
if (err)
return err;
err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
err = aes_prepareenckey(&ctx->tweak_key, in_key + key_len, key_len);
if (err)
return err;
@@ -279,7 +282,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
__aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
aes_encrypt(&ctx->tweak_key, walk.iv, walk.iv);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -311,9 +314,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
if (encrypt)
__aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
aes_encrypt(&ctx->fallback, buf, buf);
else
__aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
aes_decrypt(&ctx->fallback, buf, buf);
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);

View File

@@ -204,20 +204,24 @@ static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey,
unsigned int keylen)
{
struct gcm_key *ctx = crypto_aead_ctx(tfm);
struct crypto_aes_ctx aes_ctx;
struct aes_enckey aes_key;
be128 h, k;
int ret;
ret = aes_expandkey(&aes_ctx, inkey, keylen);
ret = aes_prepareenckey(&aes_key, inkey, keylen);
if (ret)
return -EINVAL;
aes_encrypt(&aes_ctx, (u8 *)&k, (u8[AES_BLOCK_SIZE]){});
aes_encrypt(&aes_key, (u8 *)&k, (u8[AES_BLOCK_SIZE]){});
memcpy(ctx->rk, aes_ctx.key_enc, sizeof(ctx->rk));
/*
* Note: this assumes that the arm implementation of the AES library
* stores the standard round keys in k.rndkeys.
*/
memcpy(ctx->rk, aes_key.k.rndkeys, sizeof(ctx->rk));
ctx->rounds = 6 + keylen / 4;
memzero_explicit(&aes_ctx, sizeof(aes_ctx));
memzero_explicit(&aes_key, sizeof(aes_key));
ghash_reflect(ctx->h[0], &k);

View File

@@ -1,80 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
* (NEON accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_neon);
kernel_neon_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static int nhpoly1305_neon_digest(struct shash_desc *desc,
const u8 *src, unsigned int srclen, u8 *out)
{
return crypto_nhpoly1305_init(desc) ?:
nhpoly1305_neon_update(desc, src, srclen) ?:
crypto_nhpoly1305_final(desc, out);
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-neon",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_neon_update,
.final = crypto_nhpoly1305_final,
.digest = nhpoly1305_neon_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-neon");

View File

@@ -15,16 +15,6 @@ config CRYPTO_GHASH_ARM64_CE
Architecture: arm64 using:
- ARMv8 Crypto Extensions
config CRYPTO_NHPOLY1305_NEON
tristate "Hash functions: NHPoly1305 (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function (Adiantum)
Architecture: arm64 using:
- NEON (Advanced SIMD) extensions
config CRYPTO_SM3_NEON
tristate "Hash functions: SM3 (NEON)"
depends on KERNEL_MODE_NEON
@@ -47,35 +37,12 @@ config CRYPTO_SM3_ARM64_CE
Architecture: arm64 using:
- ARMv8.2 Crypto Extensions
config CRYPTO_AES_ARM64
tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS"
select CRYPTO_AES
select CRYPTO_LIB_SHA256
help
Block ciphers: AES cipher algorithms (FIPS-197)
Length-preserving ciphers: AES with ECB, CBC, CTR, CTS,
XCTR, and XTS modes
AEAD cipher: AES with CBC, ESSIV, and SHA-256
for fscrypt and dm-crypt
Architecture: arm64
config CRYPTO_AES_ARM64_CE
tristate "Ciphers: AES (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
help
Block ciphers: AES cipher algorithms (FIPS-197)
Architecture: arm64 using:
- ARMv8 Crypto Extensions
config CRYPTO_AES_ARM64_CE_BLK
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_AES_ARM64_CE
select CRYPTO_LIB_AES
select CRYPTO_LIB_SHA256
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -93,6 +60,7 @@ config CRYPTO_AES_ARM64_NEON_BLK
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_LIB_SHA256
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -174,7 +142,6 @@ config CRYPTO_AES_ARM64_CE_CCM
tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64_CE
select CRYPTO_AES_ARM64_CE_BLK
select CRYPTO_AEAD
select CRYPTO_LIB_AES

View File

@@ -29,9 +29,6 @@ sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o
obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
@@ -41,11 +38,5 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o

View File

@@ -17,8 +17,6 @@
#include <asm/simd.h>
#include "aes-ce-setkey.h"
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static int num_rounds(struct crypto_aes_ctx *ctx)

View File

@@ -1,178 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
*
* Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <asm/neon.h>
#include <asm/simd.h>
#include <linux/unaligned.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/module.h>
#include "aes-ce-setkey.h"
MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
struct aes_block {
u8 b[AES_BLOCK_SIZE];
};
asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
asmlinkage u32 __aes_ce_sub(u32 l);
asmlinkage void __aes_ce_invert(struct aes_block *out,
const struct aes_block *in);
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
* # of rounds specified by AES:
* 128 bit key 10 rounds
* 192 bit key 12 rounds
* 256 bit key 14 rounds
* => n byte key => 6 + (n/4) rounds
*/
return 6 + ctx->key_length / 4;
}
static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!crypto_simd_usable()) {
aes_encrypt(ctx, dst, src);
return;
}
scoped_ksimd()
__aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
}
static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!crypto_simd_usable()) {
aes_decrypt(ctx, dst, src);
return;
}
scoped_ksimd()
__aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
}
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
/*
* The AES key schedule round constants
*/
static u8 const rcon[] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
};
u32 kwords = key_len / sizeof(u32);
struct aes_block *key_enc, *key_dec;
int i, j;
if (key_len != AES_KEYSIZE_128 &&
key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256)
return -EINVAL;
ctx->key_length = key_len;
for (i = 0; i < kwords; i++)
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
scoped_ksimd() {
for (i = 0; i < sizeof(rcon); i++) {
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^
rcon[i] ^ rki[0];
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
if (key_len == AES_KEYSIZE_192) {
if (i >= 7)
break;
rko[4] = rko[3] ^ rki[4];
rko[5] = rko[4] ^ rki[5];
} else if (key_len == AES_KEYSIZE_256) {
if (i >= 6)
break;
rko[4] = __aes_ce_sub(rko[3]) ^ rki[4];
rko[5] = rko[4] ^ rki[5];
rko[6] = rko[5] ^ rki[6];
rko[7] = rko[6] ^ rki[7];
}
}
/*
* Generate the decryption keys for the Equivalent Inverse
* Cipher. This involves reversing the order of the round
* keys, and applying the Inverse Mix Columns transformation on
* all but the first and the last one.
*/
key_enc = (struct aes_block *)ctx->key_enc;
key_dec = (struct aes_block *)ctx->key_dec;
j = num_rounds(ctx);
key_dec[0] = key_enc[j];
for (i = 1, j--; j > 0; i++, j--)
__aes_ce_invert(key_dec + i, key_enc + j);
key_dec[i] = key_enc[0];
}
return 0;
}
EXPORT_SYMBOL(ce_aes_expandkey);
int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
return ce_aes_expandkey(ctx, in_key, key_len);
}
EXPORT_SYMBOL(ce_aes_setkey);
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-ce",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = ce_aes_setkey,
.cia_encrypt = aes_cipher_encrypt,
.cia_decrypt = aes_cipher_decrypt
}
};
static int __init aes_mod_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_mod_exit(void)
{
crypto_unregister_alg(&aes_alg);
}
module_cpu_feature_match(AES, aes_mod_init);
module_exit(aes_mod_exit);

View File

@@ -1,6 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);

View File

@@ -1,63 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
static void aes_arm64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm64_encrypt(ctx->key_enc, out, in, rounds);
}
static void aes_arm64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm64_decrypt(ctx->key_dec, out, in, rounds);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-arm64",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = crypto_aes_set_key,
.cra_cipher.cia_encrypt = aes_arm64_encrypt,
.cra_cipher.cia_decrypt = aes_arm64_decrypt
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Scalar AES cipher for arm64");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");

View File

@@ -21,8 +21,6 @@
#include <asm/hwcap.h>
#include <asm/simd.h>
#include "aes-ce-setkey.h"
#ifdef USE_V8_CRYPTO_EXTENSIONS
#define MODE "ce"
#define PRIO 300

View File

@@ -40,7 +40,7 @@ struct arm_ghash_desc_ctx {
};
struct gcm_aes_ctx {
struct crypto_aes_ctx aes_key;
struct aes_enckey aes_key;
u8 nonce[RFC4106_NONCE_SIZE];
struct ghash_key ghash_key;
};
@@ -186,18 +186,6 @@ static struct shash_alg ghash_alg = {
.statesize = sizeof(struct ghash_desc_ctx),
};
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
* # of rounds specified by AES:
* 128 bit key 10 rounds
* 192 bit key 12 rounds
* 256 bit key 14 rounds
* => n byte key => 6 + (n/4) rounds
*/
return 6 + ctx->key_length / 4;
}
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey,
unsigned int keylen)
{
@@ -206,7 +194,7 @@ static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey,
be128 h;
int ret;
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
ret = aes_prepareenckey(&ctx->aes_key, inkey, keylen);
if (ret)
return -EINVAL;
@@ -296,7 +284,6 @@ static int gcm_encrypt(struct aead_request *req, char *iv, int assoclen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
u8 buf[AES_BLOCK_SIZE];
u64 dg[2] = {};
@@ -331,8 +318,8 @@ static int gcm_encrypt(struct aead_request *req, char *iv, int assoclen)
scoped_ksimd()
pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
dg, iv, ctx->aes_key.key_enc, nrounds,
tag);
dg, iv, ctx->aes_key.k.rndkeys,
ctx->aes_key.nrounds, tag);
if (unlikely(!nbytes))
break;
@@ -359,7 +346,6 @@ static int gcm_decrypt(struct aead_request *req, char *iv, int assoclen)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
u8 otag[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
@@ -401,8 +387,9 @@ static int gcm_decrypt(struct aead_request *req, char *iv, int assoclen)
scoped_ksimd()
ret = pmull_gcm_decrypt(nbytes, dst, src,
ctx->ghash_key.h,
dg, iv, ctx->aes_key.key_enc,
nrounds, tag, otag, authsize);
dg, iv, ctx->aes_key.k.rndkeys,
ctx->aes_key.nrounds, tag, otag,
authsize);
if (unlikely(!nbytes))
break;

View File

@@ -1,79 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
* (ARM64 NEON accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
scoped_ksimd()
crypto_nhpoly1305_update_helper(desc, src, n, nh_neon);
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static int nhpoly1305_neon_digest(struct shash_desc *desc,
const u8 *src, unsigned int srclen, u8 *out)
{
return crypto_nhpoly1305_init(desc) ?:
nhpoly1305_neon_update(desc, src, srclen) ?:
crypto_nhpoly1305_final(desc, out);
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-neon",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_neon_update,
.final = crypto_nhpoly1305_final,
.digest = nhpoly1305_neon_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-neon");

View File

@@ -555,7 +555,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -512,7 +512,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -532,7 +532,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -504,7 +504,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -514,7 +514,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -531,7 +531,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -618,7 +618,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -504,7 +504,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -505,7 +505,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -521,7 +521,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -502,7 +502,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -502,7 +502,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -5,9 +5,9 @@ menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
config CRYPTO_AES_PPC_SPE
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
depends on SPE
select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
Length-preserving ciphers: AES with ECB, CBC, CTR, and XTS modes
Architecture: powerpc using:

View File

@@ -9,9 +9,9 @@ obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
aes-ppc-spe-y := aes-spe-glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
vmx-crypto-objs := vmx.o ghashp8-ppc.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override flavour := linux-ppc64le
@@ -26,15 +26,14 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $< $(flavour) > $@
targets += aesp10-ppc.S ghashp10-ppc.S aesp8-ppc.S ghashp8-ppc.S
targets += aesp10-ppc.S ghashp10-ppc.S ghashp8-ppc.S
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y

View File

@@ -44,7 +44,7 @@ asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
asmlinkage void gcm_update(u8 *iv, void *Xi);
struct aes_key {
struct p10_aes_key {
u8 key[AES_MAX_KEYLENGTH];
u64 rounds;
};
@@ -63,7 +63,7 @@ struct Hash_ctx {
};
struct p10_aes_gcm_ctx {
struct aes_key enc_key;
struct p10_aes_key enc_key;
u8 nonce[RFC4106_NONCE_SIZE];
};

View File

@@ -51,30 +51,6 @@ struct ppc_xts_ctx {
u32 rounds;
};
extern void ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc, u32 rounds);
extern void ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec, u32 rounds);
extern void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
u32 bytes);
extern void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
u32 bytes);
extern void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
u32 bytes, u8 *iv);
extern void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
u32 bytes, u8 *iv);
extern void ppc_crypt_ctr (u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
u32 bytes, u8 *iv);
extern void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
u32 bytes, u8 *iv, u32 *key_twk);
extern void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
u32 bytes, u8 *iv, u32 *key_twk);
extern void ppc_expand_key_128(u32 *key_enc, const u8 *key);
extern void ppc_expand_key_192(u32 *key_enc, const u8 *key);
extern void ppc_expand_key_256(u32 *key_enc, const u8 *key);
extern void ppc_generate_decrypt_key(u32 *key_dec,u32 *key_enc,
unsigned int key_len);
static void spe_begin(void)
{
/* disable preemption and save users SPE registers if required */
@@ -89,10 +65,10 @@ static void spe_end(void)
preempt_enable();
}
static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *in_key, unsigned int key_len)
{
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -116,12 +92,6 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *in_key, unsigned int key_len)
{
return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
}
static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -159,24 +129,6 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return 0;
}
static void ppc_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
spe_begin();
ppc_encrypt_aes(out, in, ctx->key_enc, ctx->rounds);
spe_end();
}
static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
spe_begin();
ppc_decrypt_aes(out, in, ctx->key_dec, ctx->rounds);
spe_end();
}
static int ppc_ecb_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -410,26 +362,6 @@ static int ppc_xts_decrypt(struct skcipher_request *req)
* with kmalloc() in the crypto infrastructure
*/
static struct crypto_alg aes_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-ppc-spe",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = ppc_aes_setkey,
.cia_encrypt = ppc_aes_encrypt,
.cia_decrypt = ppc_aes_decrypt
}
}
};
static struct skcipher_alg aes_skcipher_algs[] = {
{
.base.cra_name = "ecb(aes)",
@@ -488,22 +420,12 @@ static struct skcipher_alg aes_skcipher_algs[] = {
static int __init ppc_aes_mod_init(void)
{
int err;
err = crypto_register_alg(&aes_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(aes_skcipher_algs,
ARRAY_SIZE(aes_skcipher_algs));
if (err)
crypto_unregister_alg(&aes_cipher_alg);
return err;
return crypto_register_skciphers(aes_skcipher_algs,
ARRAY_SIZE(aes_skcipher_algs));
}
static void __exit ppc_aes_mod_fini(void)
{
crypto_unregister_alg(&aes_cipher_alg);
crypto_unregister_skciphers(aes_skcipher_algs,
ARRAY_SIZE(aes_skcipher_algs));
}

View File

@@ -1,134 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* AES routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
*
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "aesp8-ppc.h"
struct p8_aes_ctx {
struct crypto_cipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
static int p8_aes_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
struct crypto_cipher *fallback;
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
crypto_cipher_set_flags(fallback,
crypto_cipher_get_flags((struct
crypto_cipher *)
tfm));
ctx->fallback = fallback;
return 0;
}
static void p8_aes_exit(struct crypto_tfm *tfm)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
crypto_free_cipher(ctx->fallback);
ctx->fallback = NULL;
}
}
static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
int ret;
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!crypto_simd_usable()) {
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
}
static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!crypto_simd_usable()) {
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
}
struct crypto_alg p8_aes_alg = {
.cra_name = "aes",
.cra_driver_name = "p8_aes",
.cra_module = THIS_MODULE,
.cra_priority = 1000,
.cra_type = NULL,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
.cra_alignmask = 0,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_aes_ctx),
.cra_init = p8_aes_init,
.cra_exit = p8_aes_exit,
.cra_cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = p8_aes_setkey,
.cia_encrypt = p8_aes_encrypt,
.cia_decrypt = p8_aes_decrypt,
},
};

View File

@@ -21,8 +21,8 @@
struct p8_aes_cbc_ctx {
struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct p8_aes_key enc_key;
struct p8_aes_key dec_key;
};
static int p8_aes_cbc_init(struct crypto_skcipher *tfm)

View File

@@ -21,7 +21,7 @@
struct p8_aes_ctr_ctx {
struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct p8_aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_skcipher *tfm)

View File

@@ -22,9 +22,9 @@
struct p8_aes_xts_ctx {
struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
struct p8_aes_key enc_key;
struct p8_aes_key dec_key;
struct p8_aes_key tweak_key;
};
static int p8_aes_xts_init(struct crypto_skcipher *tfm)

View File

@@ -2,29 +2,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
int rounds;
};
extern struct shash_alg p8_ghash_alg;
extern struct crypto_alg p8_aes_alg;
extern struct skcipher_alg p8_aes_cbc_alg;
extern struct skcipher_alg p8_aes_ctr_alg;
extern struct skcipher_alg p8_aes_xts_alg;
int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
struct aes_key *key);
int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
struct aes_key *key);
void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key);
void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
const struct aes_key *key, u8 *iv, const int enc);
void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
size_t len, const struct aes_key *key,
const u8 *iv);
void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
const struct aes_key *key1, const struct aes_key *key2, u8 *iv);

View File

@@ -27,13 +27,9 @@ static int __init p8_init(void)
if (ret)
goto err;
ret = crypto_register_alg(&p8_aes_alg);
if (ret)
goto err_unregister_ghash;
ret = crypto_register_skcipher(&p8_aes_cbc_alg);
if (ret)
goto err_unregister_aes;
goto err_unregister_ghash;
ret = crypto_register_skcipher(&p8_aes_ctr_alg);
if (ret)
@@ -49,8 +45,6 @@ err_unregister_aes_ctr:
crypto_unregister_skcipher(&p8_aes_ctr_alg);
err_unregister_aes_cbc:
crypto_unregister_skcipher(&p8_aes_cbc_alg);
err_unregister_aes:
crypto_unregister_alg(&p8_aes_alg);
err_unregister_ghash:
crypto_unregister_shash(&p8_ghash_alg);
err:
@@ -62,7 +56,6 @@ static void __exit p8_exit(void)
crypto_unregister_skcipher(&p8_aes_xts_alg);
crypto_unregister_skcipher(&p8_aes_ctr_alg);
crypto_unregister_skcipher(&p8_aes_cbc_alg);
crypto_unregister_alg(&p8_aes_alg);
crypto_unregister_shash(&p8_ghash_alg);
}
@@ -74,4 +67,3 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
"support on Power 8");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");

View File

@@ -6,11 +6,9 @@ config CRYPTO_AES_RISCV64
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS"
depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
help
Block cipher: AES cipher algorithms
Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTS
Architecture: riscv64 using:

View File

@@ -51,8 +51,10 @@
// - If AES-256, loads round keys into v1-v15 and continues onwards.
//
// Also sets vl=4 and vtype=e32,m1,ta,ma. Clobbers t0 and t1.
.macro aes_begin keyp, label128, label192
.macro aes_begin keyp, label128, label192, key_len
.ifb \key_len
lwu t0, 480(\keyp) // t0 = key length in bytes
.endif
li t1, 24 // t1 = key length for AES-192
vsetivli zero, 4, e32, m1, ta, ma
vle32.v v1, (\keyp)
@@ -76,12 +78,20 @@
vle32.v v10, (\keyp)
addi \keyp, \keyp, 16
vle32.v v11, (\keyp)
.ifb \key_len
blt t0, t1, \label128 // If AES-128, goto label128.
.else
blt \key_len, t1, \label128 // If AES-128, goto label128.
.endif
addi \keyp, \keyp, 16
vle32.v v12, (\keyp)
addi \keyp, \keyp, 16
vle32.v v13, (\keyp)
.ifb \key_len
beq t0, t1, \label192 // If AES-192, goto label192.
.else
beq \key_len, t1, \label192 // If AES-192, goto label192.
.endif
// Else, it's AES-256.
addi \keyp, \keyp, 16
vle32.v v14, (\keyp)

View File

@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* AES using the RISC-V vector crypto extensions. Includes the bare block
* cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes.
* AES modes using the RISC-V vector crypto extensions
*
* Copyright (C) 2023 VRULL GmbH
* Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
@@ -15,7 +14,6 @@
#include <asm/simd.h>
#include <asm/vector.h>
#include <crypto/aes.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -23,13 +21,6 @@
#include <linux/linkage.h>
#include <linux/module.h>
asmlinkage void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
const u8 in[AES_BLOCK_SIZE],
u8 out[AES_BLOCK_SIZE]);
asmlinkage void aes_decrypt_zvkned(const struct crypto_aes_ctx *key,
const u8 in[AES_BLOCK_SIZE],
u8 out[AES_BLOCK_SIZE]);
asmlinkage void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key,
const u8 *in, u8 *out, size_t len);
asmlinkage void aes_ecb_decrypt_zvkned(const struct crypto_aes_ctx *key,
@@ -86,14 +77,6 @@ static int riscv64_aes_setkey(struct crypto_aes_ctx *ctx,
return aes_expandkey(ctx, key, keylen);
}
static int riscv64_aes_setkey_cipher(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
return riscv64_aes_setkey(ctx, key, keylen);
}
static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -102,34 +85,6 @@ static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm,
return riscv64_aes_setkey(ctx, key, keylen);
}
/* Bare AES, without a mode of operation */
static void riscv64_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (crypto_simd_usable()) {
kernel_vector_begin();
aes_encrypt_zvkned(ctx, src, dst);
kernel_vector_end();
} else {
aes_encrypt(ctx, dst, src);
}
}
static void riscv64_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (crypto_simd_usable()) {
kernel_vector_begin();
aes_decrypt_zvkned(ctx, src, dst);
kernel_vector_end();
} else {
aes_decrypt(ctx, dst, src);
}
}
/* AES-ECB */
static inline int riscv64_aes_ecb_crypt(struct skcipher_request *req, bool enc)
@@ -338,7 +293,7 @@ static int riscv64_aes_ctr_crypt(struct skcipher_request *req)
struct riscv64_aes_xts_ctx {
struct crypto_aes_ctx ctx1;
struct crypto_aes_ctx ctx2;
struct aes_enckey tweak_key;
};
static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -348,7 +303,7 @@ static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
return xts_verify_key(tfm, key, keylen) ?:
riscv64_aes_setkey(&ctx->ctx1, key, keylen / 2) ?:
riscv64_aes_setkey(&ctx->ctx2, key + keylen / 2, keylen / 2);
aes_prepareenckey(&ctx->tweak_key, key + keylen / 2, keylen / 2);
}
static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc)
@@ -366,9 +321,7 @@ static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc)
return -EINVAL;
/* Encrypt the IV with the tweak key to get the first tweak. */
kernel_vector_begin();
aes_encrypt_zvkned(&ctx->ctx2, req->iv, req->iv);
kernel_vector_end();
aes_encrypt(&ctx->tweak_key, req->iv, req->iv);
err = skcipher_walk_virt(&walk, req, false);
@@ -456,23 +409,6 @@ static int riscv64_aes_xts_decrypt(struct skcipher_request *req)
/* Algorithm definitions */
static struct crypto_alg riscv64_zvkned_aes_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_priority = 300,
.cra_name = "aes",
.cra_driver_name = "aes-riscv64-zvkned",
.cra_cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = riscv64_aes_setkey_cipher,
.cia_encrypt = riscv64_aes_encrypt,
.cia_decrypt = riscv64_aes_decrypt,
},
.cra_module = THIS_MODULE,
};
static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = {
{
.setkey = riscv64_aes_setkey_skcipher,
@@ -574,15 +510,11 @@ static int __init riscv64_aes_mod_init(void)
if (riscv_isa_extension_available(NULL, ZVKNED) &&
riscv_vector_vlen() >= 128) {
err = crypto_register_alg(&riscv64_zvkned_aes_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(
riscv64_zvkned_aes_skcipher_algs,
ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
if (err)
goto unregister_zvkned_cipher_alg;
return err;
if (riscv_isa_extension_available(NULL, ZVKB)) {
err = crypto_register_skcipher(
@@ -607,8 +539,6 @@ unregister_zvkned_zvkb_skcipher_alg:
unregister_zvkned_skcipher_algs:
crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
unregister_zvkned_cipher_alg:
crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
return err;
}
@@ -620,7 +550,6 @@ static void __exit riscv64_aes_mod_exit(void)
crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
}
module_init(riscv64_aes_mod_init);

View File

@@ -56,33 +56,6 @@
#define LEN a3
#define IVP a4
.macro __aes_crypt_zvkned enc, keylen
vle32.v v16, (INP)
aes_crypt v16, \enc, \keylen
vse32.v v16, (OUTP)
ret
.endm
.macro aes_crypt_zvkned enc
aes_begin KEYP, 128f, 192f
__aes_crypt_zvkned \enc, 256
128:
__aes_crypt_zvkned \enc, 128
192:
__aes_crypt_zvkned \enc, 192
.endm
// void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
// const u8 in[16], u8 out[16]);
SYM_FUNC_START(aes_encrypt_zvkned)
aes_crypt_zvkned 1
SYM_FUNC_END(aes_encrypt_zvkned)
// Same prototype and calling convention as the encryption function
SYM_FUNC_START(aes_decrypt_zvkned)
aes_crypt_zvkned 0
SYM_FUNC_END(aes_decrypt_zvkned)
.macro __aes_ecb_crypt enc, keylen
srli t0, LEN, 2
// t0 is the remaining length in 32-bit words. It's a multiple of 4.

View File

@@ -771,7 +771,7 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -755,7 +755,7 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m

View File

@@ -14,10 +14,8 @@ config CRYPTO_GHASH_S390
config CRYPTO_AES_S390
tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM"
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
Block cipher: AES cipher algorithms (FIPS 197)
AEAD cipher: AES with GCM
Length-preserving ciphers: AES with ECB, CBC, XTS, and CTR modes

View File

@@ -20,7 +20,6 @@
#include <crypto/algapi.h>
#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
@@ -45,7 +44,6 @@ struct s390_aes_ctx {
unsigned long fc;
union {
struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
};
@@ -72,109 +70,6 @@ struct gcm_sg_walk {
unsigned int nbytes;
};
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
fc = (key_len == 16) ? CPACF_KM_AES_128 :
(key_len == 24) ? CPACF_KM_AES_192 :
(key_len == 32) ? CPACF_KM_AES_256 : 0;
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!sctx->fc)
return setkey_fallback_cip(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(!sctx->fc)) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
return;
}
cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
}
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(!sctx->fc)) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
return;
}
cpacf_km(sctx->fc | CPACF_DECRYPT,
&sctx->key, out, in, AES_BLOCK_SIZE);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.cip);
}
return 0;
}
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(sctx->fallback.cip);
sctx->fallback.cip = NULL;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = crypto_aes_encrypt,
.cia_decrypt = crypto_aes_decrypt,
}
}
};
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
@@ -1049,7 +944,6 @@ static struct aead_alg gcm_aes_aead = {
},
};
static struct crypto_alg *aes_s390_alg;
static struct skcipher_alg *aes_s390_skcipher_algs[5];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
@@ -1066,8 +960,6 @@ static int aes_s390_register_skcipher(struct skcipher_alg *alg)
static void aes_s390_fini(void)
{
if (aes_s390_alg)
crypto_unregister_alg(aes_s390_alg);
while (aes_s390_skciphers_num--)
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
if (ctrblk)
@@ -1090,10 +982,6 @@ static int __init aes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
ret = crypto_register_alg(&aes_alg);
if (ret)
goto out_err;
aes_s390_alg = &aes_alg;
ret = aes_s390_register_skcipher(&ecb_aes_alg);
if (ret)
goto out_err;
@@ -1156,4 +1044,3 @@ MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");

View File

@@ -19,9 +19,9 @@ config CRYPTO_DES_SPARC64
config CRYPTO_AES_SPARC64
tristate "Ciphers: AES, modes: ECB, CBC, CTR"
depends on SPARC64
select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
Length-preseving ciphers: AES with ECB, CBC, and CTR modes
Architecture: sparc64 using crypto instructions

View File

@@ -7,6 +7,6 @@ obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
aes-sparc64-y := aes_asm.o aes_glue.o
aes-sparc64-y := aes_glue.o
des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o

View File

@@ -32,8 +32,6 @@
#include <asm/elf.h>
struct aes_ops {
void (*encrypt)(const u64 *key, const u32 *input, u32 *output);
void (*decrypt)(const u64 *key, const u32 *input, u32 *output);
void (*load_encrypt_keys)(const u64 *key);
void (*load_decrypt_keys)(const u64 *key);
void (*ecb_encrypt)(const u64 *key, const u64 *input, u64 *output,
@@ -55,79 +53,7 @@ struct crypto_sparc64_aes_ctx {
u32 expanded_key_length;
};
extern void aes_sparc64_encrypt_128(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_encrypt_192(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_encrypt_256(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_decrypt_128(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_decrypt_192(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_decrypt_256(const u64 *key, const u32 *input,
u32 *output);
extern void aes_sparc64_load_encrypt_keys_128(const u64 *key);
extern void aes_sparc64_load_encrypt_keys_192(const u64 *key);
extern void aes_sparc64_load_encrypt_keys_256(const u64 *key);
extern void aes_sparc64_load_decrypt_keys_128(const u64 *key);
extern void aes_sparc64_load_decrypt_keys_192(const u64 *key);
extern void aes_sparc64_load_decrypt_keys_256(const u64 *key);
extern void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len);
extern void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
static struct aes_ops aes128_ops = {
.encrypt = aes_sparc64_encrypt_128,
.decrypt = aes_sparc64_decrypt_128,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_128,
.load_decrypt_keys = aes_sparc64_load_decrypt_keys_128,
.ecb_encrypt = aes_sparc64_ecb_encrypt_128,
@@ -138,8 +64,6 @@ static struct aes_ops aes128_ops = {
};
static struct aes_ops aes192_ops = {
.encrypt = aes_sparc64_encrypt_192,
.decrypt = aes_sparc64_decrypt_192,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_192,
.load_decrypt_keys = aes_sparc64_load_decrypt_keys_192,
.ecb_encrypt = aes_sparc64_ecb_encrypt_192,
@@ -150,8 +74,6 @@ static struct aes_ops aes192_ops = {
};
static struct aes_ops aes256_ops = {
.encrypt = aes_sparc64_encrypt_256,
.decrypt = aes_sparc64_decrypt_256,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_256,
.load_decrypt_keys = aes_sparc64_load_decrypt_keys_256,
.ecb_encrypt = aes_sparc64_ecb_encrypt_256,
@@ -161,13 +83,10 @@ static struct aes_ops aes256_ops = {
.ctr_crypt = aes_sparc64_ctr_crypt_256,
};
extern void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
unsigned int key_len);
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -195,26 +114,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
}
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
}
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
}
static int ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -358,26 +257,6 @@ static int ctr_crypt(struct skcipher_request *req)
return err;
}
static struct crypto_alg cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = crypto_aes_encrypt,
.cia_decrypt = crypto_aes_decrypt
}
}
};
static struct skcipher_alg skcipher_algs[] = {
{
.base.cra_name = "ecb(aes)",
@@ -440,26 +319,17 @@ static bool __init sparc64_has_aes_opcode(void)
static int __init aes_sparc64_mod_init(void)
{
int err;
if (!sparc64_has_aes_opcode()) {
pr_info("sparc64 aes opcodes not available.\n");
return -ENODEV;
}
pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
err = crypto_register_alg(&cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(skcipher_algs,
ARRAY_SIZE(skcipher_algs));
if (err)
crypto_unregister_alg(&cipher_alg);
return err;
return crypto_register_skciphers(skcipher_algs,
ARRAY_SIZE(skcipher_algs));
}
static void __exit aes_sparc64_mod_fini(void)
{
crypto_unregister_alg(&cipher_alg);
crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
}

View File

@@ -7,10 +7,8 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AEAD
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
Block cipher: AES cipher algorithms
AEAD cipher: AES with GCM
Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XCTR, XTS
@@ -333,26 +331,6 @@ config CRYPTO_AEGIS128_AESNI_SSE2
- AES-NI (AES New Instructions)
- SSE4.1 (Streaming SIMD Extensions 4.1)
config CRYPTO_NHPOLY1305_SSE2
tristate "Hash functions: NHPoly1305 (SSE2)"
depends on 64BIT
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function for Adiantum
Architecture: x86_64 using:
- SSE2 (Streaming SIMD Extensions 2)
config CRYPTO_NHPOLY1305_AVX2
tristate "Hash functions: NHPoly1305 (AVX2)"
depends on 64BIT
select CRYPTO_NHPOLY1305
help
NHPoly1305 hash function for Adiantum
Architecture: x86_64 using:
- AVX2 (Advanced Vector Extensions 2)
config CRYPTO_SM3_AVX_X86_64
tristate "Hash functions: SM3 (AVX)"
depends on 64BIT

View File

@@ -53,11 +53,6 @@ aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
obj-$(CONFIG_CRYPTO_SM3_AVX_X86_64) += sm3-avx-x86_64.o
sm3-avx-x86_64-y := sm3-avx-asm_64.o sm3_avx_glue.o

View File

@@ -143,10 +143,11 @@
.octa 0
// Offsets in struct aes_gcm_key_aesni
#define OFFSETOF_AESKEYLEN 480
#define OFFSETOF_H_POWERS 496
#define OFFSETOF_H_POWERS_XORED 624
#define OFFSETOF_H_TIMES_X64 688
#define OFFSETOF_AESKEYLEN 0
#define OFFSETOF_AESROUNDKEYS 16
#define OFFSETOF_H_POWERS 272
#define OFFSETOF_H_POWERS_XORED 400
#define OFFSETOF_H_TIMES_X64 464
.text
@@ -505,9 +506,9 @@
// Encrypt an all-zeroes block to get the raw hash subkey.
movl OFFSETOF_AESKEYLEN(KEY), %eax
lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR
movdqa (KEY), H_POW1 // Zero-th round key XOR all-zeroes block
lea 16(KEY), %rax
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,%rax,4), RNDKEYLAST_PTR
movdqa OFFSETOF_AESROUNDKEYS(KEY), H_POW1
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
1:
aesenc (%rax), H_POW1
add $16, %rax
@@ -624,7 +625,7 @@
// the zero-th AES round key. Clobbers TMP0 and TMP1.
.macro _ctr_begin_8x
movq .Lone(%rip), TMP0
movdqa (KEY), TMP1 // zero-th round key
movdqa OFFSETOF_AESROUNDKEYS(KEY), TMP1 // zero-th round key
.irp i, 0,1,2,3,4,5,6,7
_vpshufb BSWAP_MASK, LE_CTR, AESDATA\i
pxor TMP1, AESDATA\i
@@ -726,7 +727,7 @@
movdqu (LE_CTR_PTR), LE_CTR
movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
// If there are at least 8*16 bytes of data, then continue into the main
// loop, which processes 8*16 bytes of data per iteration.
@@ -745,7 +746,7 @@
.if \enc
// Encrypt the first 8 plaintext blocks.
_ctr_begin_8x
lea 16(KEY), %rsi
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rsi
.p2align 4
1:
movdqa (%rsi), TMP0
@@ -767,7 +768,7 @@
// Generate the next set of 8 counter blocks and start encrypting them.
_ctr_begin_8x
lea 16(KEY), %rsi
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rsi
// Do a round of AES, and start the GHASH update of 8 ciphertext blocks
// by doing the unreduced multiplication for the first ciphertext block.
@@ -869,7 +870,7 @@
// Encrypt the next counter block.
_vpshufb BSWAP_MASK, LE_CTR, TMP0
paddd ONE, LE_CTR
pxor (KEY), TMP0
pxor OFFSETOF_AESROUNDKEYS(KEY), TMP0
lea -6*16(RNDKEYLAST_PTR), %rsi // Reduce code size
cmp $24, AESKEYLEN
jl 128f // AES-128?
@@ -926,8 +927,8 @@
// Encrypt a counter block for the last time.
pshufb BSWAP_MASK, LE_CTR
pxor (KEY), LE_CTR
lea 16(KEY), %rsi
pxor OFFSETOF_AESROUNDKEYS(KEY), LE_CTR
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rsi
1:
aesenc (%rsi), LE_CTR
add $16, %rsi
@@ -1038,12 +1039,12 @@
// Make %rax point to the 6th from last AES round key. (Using signed
// byte offsets -7*16 through 6*16 decreases code size.)
lea (KEY,AESKEYLEN64,4), %rax
lea OFFSETOF_AESROUNDKEYS(KEY,AESKEYLEN64,4), %rax
// AES-encrypt the counter block and also multiply GHASH_ACC by H^1.
// Interleave the AES and GHASH instructions to improve performance.
pshufb BSWAP_MASK, %xmm0
pxor (KEY), %xmm0
pxor OFFSETOF_AESROUNDKEYS(KEY), %xmm0
cmp $24, AESKEYLEN
jl 128f // AES-128?
je 192f // AES-192?

View File

@@ -122,8 +122,9 @@
.octa 2
// Offsets in struct aes_gcm_key_vaes_avx2
#define OFFSETOF_AESKEYLEN 480
#define OFFSETOF_H_POWERS 512
#define OFFSETOF_AESKEYLEN 0
#define OFFSETOF_AESROUNDKEYS 16
#define OFFSETOF_H_POWERS 288
#define NUM_H_POWERS 8
#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
#define OFFSETOF_H_POWERS_XORED OFFSETOFEND_H_POWERS
@@ -240,9 +241,9 @@ SYM_FUNC_START(aes_gcm_precompute_vaes_avx2)
// Encrypt an all-zeroes block to get the raw hash subkey.
movl OFFSETOF_AESKEYLEN(KEY), %eax
lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR
vmovdqu (KEY), H_CUR_XMM // Zero-th round key XOR all-zeroes block
lea 16(KEY), %rax
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,%rax,4), RNDKEYLAST_PTR
vmovdqu OFFSETOF_AESROUNDKEYS(KEY), H_CUR_XMM
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
1:
vaesenc (%rax), H_CUR_XMM, H_CUR_XMM
add $16, %rax
@@ -635,7 +636,7 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx2)
// the last AES round. Clobbers %rax and TMP0.
.macro _aesenc_loop vecs:vararg
_ctr_begin \vecs
lea 16(KEY), %rax
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
.Laesenc_loop\@:
vbroadcasti128 (%rax), TMP0
_vaesenc TMP0, \vecs
@@ -768,8 +769,8 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx2)
// Make RNDKEYLAST_PTR point to the last AES round key. This is the
// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
// respectively. Then load the zero-th and last round keys.
lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
vbroadcasti128 (KEY), RNDKEY0
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
vbroadcasti128 OFFSETOF_AESROUNDKEYS(KEY), RNDKEY0
vbroadcasti128 (RNDKEYLAST_PTR), RNDKEYLAST
// Finish initializing LE_CTR by adding 1 to the second block.
@@ -1069,12 +1070,12 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx2)
.endif
// Make %rax point to the last AES round key for the chosen AES variant.
lea 6*16(KEY,AESKEYLEN64,4), %rax
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), %rax
// Start the AES encryption of the counter block by swapping the counter
// block to big-endian and XOR-ing it with the zero-th AES round key.
vpshufb BSWAP_MASK, LE_CTR, %xmm0
vpxor (KEY), %xmm0, %xmm0
vpxor OFFSETOF_AESROUNDKEYS(KEY), %xmm0, %xmm0
// Complete the AES encryption and multiply GHASH_ACC by H^1.
// Interleave the AES and GHASH instructions to improve performance.

View File

@@ -86,10 +86,13 @@
#define NUM_H_POWERS 16
// Offset to AES key length (in bytes) in the key struct
#define OFFSETOF_AESKEYLEN 480
#define OFFSETOF_AESKEYLEN 0
// Offset to AES round keys in the key struct
#define OFFSETOF_AESROUNDKEYS 16
// Offset to start of hash key powers array in the key struct
#define OFFSETOF_H_POWERS 512
#define OFFSETOF_H_POWERS 320
// Offset to end of hash key powers array in the key struct.
//
@@ -301,9 +304,9 @@ SYM_FUNC_START(aes_gcm_precompute_vaes_avx512)
// Encrypt an all-zeroes block to get the raw hash subkey.
movl OFFSETOF_AESKEYLEN(KEY), %eax
lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR
vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block
add $16, KEY
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,%rax,4), RNDKEYLAST_PTR
vmovdqu OFFSETOF_AESROUNDKEYS(KEY), %xmm0
add $OFFSETOF_AESROUNDKEYS+16, KEY
1:
vaesenc (KEY), %xmm0, %xmm0
add $16, KEY
@@ -790,8 +793,8 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
// Make RNDKEYLAST_PTR point to the last AES round key. This is the
// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
// respectively. Then load the zero-th and last round keys.
lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
vbroadcasti32x4 (KEY), RNDKEY0
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
vbroadcasti32x4 OFFSETOF_AESROUNDKEYS(KEY), RNDKEY0
vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST
// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
@@ -834,7 +837,7 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
// Encrypt the first 4 vectors of plaintext blocks. Leave the resulting
// ciphertext in GHASHDATA[0-3] for GHASH.
_ctr_begin_4x
lea 16(KEY), %rax
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
1:
vbroadcasti32x4 (%rax), RNDKEY
_vaesenc_4x RNDKEY
@@ -957,7 +960,7 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
vpshufb BSWAP_MASK, LE_CTR, %zmm0
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
vpxord RNDKEY0, %zmm0, %zmm0
lea 16(KEY), %rax
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
1:
vbroadcasti32x4 (%rax), RNDKEY
vaesenc RNDKEY, %zmm0, %zmm0
@@ -1087,12 +1090,12 @@ SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
.endif
// Make %rax point to the last AES round key for the chosen AES variant.
lea 6*16(KEY,AESKEYLEN64,4), %rax
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), %rax
// Start the AES encryption of the counter block by swapping the counter
// block to big-endian and XOR-ing it with the zero-th AES round key.
vpshufb BSWAP_MASK, LE_CTR, %xmm0
vpxor (KEY), %xmm0, %xmm0
vpxor OFFSETOF_AESROUNDKEYS(KEY), %xmm0, %xmm0
// Complete the AES encryption and multiply GHASH_ACC by H^1.
// Interleave the AES and GHASH instructions to improve performance.

View File

@@ -436,31 +436,6 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
RET
SYM_FUNC_END(_aesni_enc4)
/*
* void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
*/
SYM_FUNC_START(aesni_dec)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
pushl KLEN
movl (FRAME_OFFSET+12)(%esp), KEYP # ctx
movl (FRAME_OFFSET+16)(%esp), OUTP # dst
movl (FRAME_OFFSET+20)(%esp), INP # src
#endif
mov 480(KEYP), KLEN # key length
add $240, KEYP
movups (INP), STATE # input
call _aesni_dec1
movups STATE, (OUTP) #output
#ifndef __x86_64__
popl KLEN
popl KEYP
#endif
FRAME_END
RET
SYM_FUNC_END(aesni_dec)
/*
* _aesni_dec1: internal ABI
* input:

View File

@@ -60,7 +60,6 @@ static inline void *aes_align_addr(void *addr)
asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
@@ -113,39 +112,6 @@ static int aes_set_key_common(struct crypto_aes_ctx *ctx,
return 0;
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!crypto_simd_usable()) {
aes_encrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
aesni_enc(ctx, dst, src);
kernel_fpu_end();
}
}
static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!crypto_simd_usable()) {
aes_decrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
aesni_dec(ctx, dst, src);
kernel_fpu_end();
}
}
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
@@ -544,25 +510,6 @@ static int xts_decrypt_aesni(struct skcipher_request *req)
return xts_crypt(req, aesni_xts_encrypt_iv, aesni_xts_decrypt);
}
static struct crypto_alg aesni_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-aesni",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aesni_encrypt,
.cia_decrypt = aesni_decrypt
}
}
};
static struct skcipher_alg aesni_skciphers[] = {
{
.base = {
@@ -833,7 +780,7 @@ DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800);
/* The common part of the x86_64 AES-GCM key struct */
struct aes_gcm_key {
/* Expanded AES key and the AES key length in bytes */
struct crypto_aes_ctx aes_key;
struct aes_enckey aes_key;
/* RFC4106 nonce (used only by the rfc4106 algorithms) */
u32 rfc4106_nonce;
@@ -842,11 +789,10 @@ struct aes_gcm_key {
/* Key struct used by the AES-NI implementations of AES-GCM */
struct aes_gcm_key_aesni {
/*
* Common part of the key. The assembly code requires 16-byte alignment
* for the round keys; we get this by them being located at the start of
* the struct and the whole struct being 16-byte aligned.
* Common part of the key. 16-byte alignment is required by the
* assembly code.
*/
struct aes_gcm_key base;
struct aes_gcm_key base __aligned(16);
/*
* Powers of the hash key H^8 through H^1. These are 128-bit values.
@@ -877,10 +823,9 @@ struct aes_gcm_key_aesni {
struct aes_gcm_key_vaes_avx2 {
/*
* Common part of the key. The assembly code prefers 16-byte alignment
* for the round keys; we get this by them being located at the start of
* the struct and the whole struct being 32-byte aligned.
* for this.
*/
struct aes_gcm_key base;
struct aes_gcm_key base __aligned(16);
/*
* Powers of the hash key H^8 through H^1. These are 128-bit values.
@@ -907,10 +852,9 @@ struct aes_gcm_key_vaes_avx2 {
struct aes_gcm_key_vaes_avx512 {
/*
* Common part of the key. The assembly code prefers 16-byte alignment
* for the round keys; we get this by them being located at the start of
* the struct and the whole struct being 64-byte aligned.
* for this.
*/
struct aes_gcm_key base;
struct aes_gcm_key base __aligned(16);
/*
* Powers of the hash key H^16 through H^1. These are 128-bit values.
@@ -1235,26 +1179,26 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
}
/* The assembly code assumes the following offsets. */
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, base.aes_key.key_enc) != 0);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, base.aes_key.key_length) != 480);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers) != 496);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers_xored) != 624);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_times_x64) != 688);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.key_enc) != 0);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.key_length) != 480);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, h_powers) != 512);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, h_powers_xored) != 640);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.key_enc) != 0);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.key_length) != 480);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, h_powers) != 512);
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, padding) != 768);
static_assert(offsetof(struct aes_gcm_key_aesni, base.aes_key.len) == 0);
static_assert(offsetof(struct aes_gcm_key_aesni, base.aes_key.k.rndkeys) == 16);
static_assert(offsetof(struct aes_gcm_key_aesni, h_powers) == 272);
static_assert(offsetof(struct aes_gcm_key_aesni, h_powers_xored) == 400);
static_assert(offsetof(struct aes_gcm_key_aesni, h_times_x64) == 464);
static_assert(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.len) == 0);
static_assert(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.k.rndkeys) == 16);
static_assert(offsetof(struct aes_gcm_key_vaes_avx2, h_powers) == 288);
static_assert(offsetof(struct aes_gcm_key_vaes_avx2, h_powers_xored) == 416);
static_assert(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.len) == 0);
static_assert(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.k.rndkeys) == 16);
static_assert(offsetof(struct aes_gcm_key_vaes_avx512, h_powers) == 320);
static_assert(offsetof(struct aes_gcm_key_vaes_avx512, padding) == 576);
err = aes_prepareenckey(&key->aes_key, raw_key, keylen);
if (err)
return err;
if (likely(crypto_simd_usable())) {
err = aes_check_keylen(keylen);
if (err)
return err;
kernel_fpu_begin();
aesni_set_key(&key->aes_key, raw_key, keylen);
aes_gcm_precompute(key, flags);
kernel_fpu_end();
} else {
@@ -1268,10 +1212,6 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
be128 h;
int i;
err = aes_expandkey(&key->aes_key, raw_key, keylen);
if (err)
return err;
/* Encrypt the all-zeroes block to get the hash key H^1 */
aes_encrypt(&key->aes_key, (u8 *)&h1, (u8 *)&h1);
@@ -1689,14 +1629,10 @@ static int __init aesni_init(void)
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
err = crypto_register_alg(&aesni_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
if (err)
goto unregister_cipher;
return err;
err = crypto_register_aeads(aes_gcm_algs_aesni,
ARRAY_SIZE(aes_gcm_algs_aesni));
@@ -1716,8 +1652,6 @@ unregister_avx:
unregister_skciphers:
crypto_unregister_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
unregister_cipher:
crypto_unregister_alg(&aesni_cipher_alg);
return err;
}
@@ -1727,7 +1661,6 @@ static void __exit aesni_exit(void)
ARRAY_SIZE(aes_gcm_algs_aesni));
crypto_unregister_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
crypto_unregister_alg(&aesni_cipher_alg);
unregister_avx_algs();
}

View File

@@ -1,81 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
* (AVX2 accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_avx2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_avx2);
kernel_fpu_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static int nhpoly1305_avx2_digest(struct shash_desc *desc,
const u8 *src, unsigned int srclen, u8 *out)
{
return crypto_nhpoly1305_init(desc) ?:
nhpoly1305_avx2_update(desc, src, srclen) ?:
crypto_nhpoly1305_final(desc, out);
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-avx2",
.base.cra_priority = 300,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_avx2_update,
.final = crypto_nhpoly1305_final,
.digest = nhpoly1305_avx2_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (AVX2-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-avx2");

View File

@@ -1,80 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
* (SSE2 accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_sse2);
kernel_fpu_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static int nhpoly1305_sse2_digest(struct shash_desc *desc,
const u8 *src, unsigned int srclen, u8 *out)
{
return crypto_nhpoly1305_init(desc) ?:
nhpoly1305_sse2_update(desc, src, srclen) ?:
crypto_nhpoly1305_final(desc, out);
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-sse2",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_sse2_update,
.final = crypto_nhpoly1305_final,
.digest = nhpoly1305_sse2_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (SSE2-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-sse2");

View File

@@ -366,27 +366,6 @@ config CRYPTO_AES
The AES specifies three key sizes: 128, 192 and 256 bits
config CRYPTO_AES_TI
tristate "AES (Advanced Encryption Standard) (fixed time)"
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
help
AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3)
This is a generic implementation of AES that attempts to eliminate
data dependent latencies as much as possible without affecting
performance too much. It is intended for use by the generic CCM
and GCM drivers, and other CTR or CMAC/XCBC based modes that rely
solely on encryption (although decryption is supported as well, but
with a more dramatic performance hit)
Instead of using 16 lookup tables of 1 KB each, (8 for encryption and
8 for decryption), this implementation only uses just two S-boxes of
256 bytes each, and attempts to eliminate data dependent latencies by
prefetching the entire table into the cache at the start of each
block. Interrupts are also disabled to avoid races where cachelines
are evicted when the CPU is interrupted to do something else.
config CRYPTO_ANUBIS
tristate "Anubis"
depends on CRYPTO_USER_API_ENABLE_OBSOLETE
@@ -601,9 +580,9 @@ menu "Length-preserving ciphers and modes"
config CRYPTO_ADIANTUM
tristate "Adiantum"
select CRYPTO_CHACHA20
select CRYPTO_LIB_NH
select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
help
Adiantum tweakable, length-preserving encryption mode
@@ -759,12 +738,6 @@ config CRYPTO_XTS
implementation currently can't handle a sectorsize which is not a
multiple of 16 bytes.
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
endmenu
menu "AEAD (authenticated encryption with associated data) ciphers"
@@ -772,7 +745,7 @@ menu "AEAD (authenticated encryption with associated data) ciphers"
config CRYPTO_AEGIS128
tristate "AEGIS-128"
select CRYPTO_AEAD
select CRYPTO_AES # for AES S-box tables
select CRYPTO_LIB_AES # for AES S-box tables
help
AEGIS-128 AEAD algorithm

View File

@@ -94,7 +94,6 @@ obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
@@ -131,11 +130,9 @@ obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
obj-$(CONFIG_CRYPTO_AES) += aes.o
obj-$(CONFIG_CRYPTO_SM4) += sm4.o
obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o

View File

@@ -20,23 +20,14 @@
*
* - Stream cipher: XChaCha12 or XChaCha20
* - Block cipher: any with a 128-bit block size and 256-bit key
*
* This implementation doesn't currently allow other ε-U hash functions, i.e.
* HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
* but still provably as secure, and also the ε-U hash function of HBSH is
* formally defined to take two inputs (tweak, message) which makes it difficult
* to wrap with the crypto_shash API. Rather, some details need to be handled
* here. Nevertheless, if needed in the future, support for other ε-U hash
* functions could be added here.
*/
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/skcipher.h>
#include <crypto/nhpoly1305.h>
#include <crypto/nh.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -50,7 +41,7 @@
#define BLOCKCIPHER_KEY_SIZE 32
/* Size of the hash key (K_H) in bytes */
#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
#define HASH_KEY_SIZE (2 * POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
/*
* The specification allows variable-length tweaks, but Linux's crypto API
@@ -64,43 +55,40 @@
struct adiantum_instance_ctx {
struct crypto_skcipher_spawn streamcipher_spawn;
struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_shash_spawn hash_spawn;
};
struct adiantum_tfm_ctx {
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
struct poly1305_core_key header_hash_key;
struct poly1305_core_key msg_poly_key;
u32 nh_key[NH_KEY_WORDS];
};
struct nhpoly1305_ctx {
/* Running total of polynomial evaluation */
struct poly1305_state poly_state;
/* Partial block buffer */
u8 buffer[NH_MESSAGE_UNIT];
unsigned int buflen;
/*
* Number of bytes remaining until the current NH message reaches
* NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
*/
unsigned int nh_remaining;
__le64 nh_hash[NH_NUM_PASSES];
};
struct adiantum_request_ctx {
/*
* Buffer for right-hand part of data, i.e.
*
* P_L => P_M => C_M => C_R when encrypting, or
* C_R => C_M => P_M => P_L when decrypting.
*
* Also used to build the IV for the stream cipher.
* skcipher sub-request size is unknown at compile-time, so it needs to
* go after the members with known sizes.
*/
union {
u8 bytes[XCHACHA_IV_SIZE];
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
} rbuf;
bool enc; /* true if encrypting, false if decrypting */
/*
* The result of the Poly1305 ε-U hash function applied to
* (bulk length, tweak)
*/
le128 header_hash;
/* Sub-requests, must be last */
union {
struct shash_desc hash_desc;
struct nhpoly1305_ctx hash_ctx;
struct skcipher_request streamcipher_req;
} u;
};
@@ -170,12 +158,11 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
/* Set the hash key (K_H) */
poly1305_core_setkey(&tctx->header_hash_key, keyp);
keyp += POLY1305_BLOCK_SIZE;
crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
keyp += NHPOLY1305_KEY_SIZE;
poly1305_core_setkey(&tctx->msg_poly_key, keyp);
keyp += POLY1305_BLOCK_SIZE;
for (int i = 0; i < NH_KEY_WORDS; i++)
tctx->nh_key[i] = get_unaligned_le32(&keyp[i * 4]);
keyp += NH_KEY_BYTES;
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
out:
kfree_sensitive(data);
@@ -206,7 +193,7 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
/*
* Apply the Poly1305 ε-U hash function to (bulk length, tweak) and save the
* result to rctx->header_hash. This is the calculation
* result to @out. This is the calculation
*
* H_T Poly1305_{K_T}(bin_{128}(|L|) || T)
*
@@ -216,11 +203,10 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
* inputs only) taken over the left-hand part (the "bulk") of the message, to
* give the overall Adiantum hash of the (tweak, left-hand part) pair.
*/
static void adiantum_hash_header(struct skcipher_request *req)
static void adiantum_hash_header(struct skcipher_request *req, le128 *out)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct {
__le64 message_bits;
@@ -240,99 +226,143 @@ static void adiantum_hash_header(struct skcipher_request *req)
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
poly1305_core_emit(&state, NULL, &rctx->header_hash);
poly1305_core_emit(&state, NULL, out);
}
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
static int adiantum_hash_message(struct skcipher_request *req,
struct scatterlist *sgl, unsigned int nents,
le128 *digest)
/* Pass the next NH hash value through Poly1305 */
static void process_nh_hash_value(struct nhpoly1305_ctx *ctx,
const struct adiantum_tfm_ctx *key)
{
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct shash_desc *hash_desc = &rctx->u.hash_desc;
struct sg_mapping_iter miter;
unsigned int i, n;
int err;
static_assert(NH_HASH_BYTES % POLY1305_BLOCK_SIZE == 0);
err = crypto_shash_init(hash_desc);
if (err)
return err;
sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
err = crypto_shash_update(hash_desc, miter.addr, n);
if (err)
break;
}
sg_miter_stop(&miter);
if (err)
return err;
return crypto_shash_final(hash_desc, (u8 *)digest);
poly1305_core_blocks(&ctx->poly_state, &key->msg_poly_key, ctx->nh_hash,
NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
}
/* Continue Adiantum encryption/decryption after the stream cipher step */
static int adiantum_finish(struct skcipher_request *req)
/*
* Feed the next portion of the message data, as a whole number of 16-byte
* "NH message units", through NH and Poly1305. Each NH hash is taken over
* 1024 bytes, except possibly the final one which is taken over a multiple of
* 16 bytes up to 1024. Also, in the case where data is passed in misaligned
* chunks, we combine partial hashes; the end result is the same either way.
*/
static void nhpoly1305_units(struct nhpoly1305_ctx *ctx,
const struct adiantum_tfm_ctx *key,
const u8 *data, size_t len)
{
do {
unsigned int bytes;
if (ctx->nh_remaining == 0) {
/* Starting a new NH message */
bytes = min(len, NH_MESSAGE_BYTES);
nh(key->nh_key, data, bytes, ctx->nh_hash);
ctx->nh_remaining = NH_MESSAGE_BYTES - bytes;
} else {
/* Continuing a previous NH message */
__le64 tmp_hash[NH_NUM_PASSES];
unsigned int pos;
pos = NH_MESSAGE_BYTES - ctx->nh_remaining;
bytes = min(len, ctx->nh_remaining);
nh(&key->nh_key[pos / 4], data, bytes, tmp_hash);
for (int i = 0; i < NH_NUM_PASSES; i++)
le64_add_cpu(&ctx->nh_hash[i],
le64_to_cpu(tmp_hash[i]));
ctx->nh_remaining -= bytes;
}
if (ctx->nh_remaining == 0)
process_nh_hash_value(ctx, key);
data += bytes;
len -= bytes;
} while (len);
}
static void nhpoly1305_init(struct nhpoly1305_ctx *ctx)
{
poly1305_core_init(&ctx->poly_state);
ctx->buflen = 0;
ctx->nh_remaining = 0;
}
static void nhpoly1305_update(struct nhpoly1305_ctx *ctx,
const struct adiantum_tfm_ctx *key,
const u8 *data, size_t len)
{
unsigned int bytes;
if (ctx->buflen) {
bytes = min(len, (int)NH_MESSAGE_UNIT - ctx->buflen);
memcpy(&ctx->buffer[ctx->buflen], data, bytes);
ctx->buflen += bytes;
if (ctx->buflen < NH_MESSAGE_UNIT)
return;
nhpoly1305_units(ctx, key, ctx->buffer, NH_MESSAGE_UNIT);
ctx->buflen = 0;
data += bytes;
len -= bytes;
}
if (len >= NH_MESSAGE_UNIT) {
bytes = round_down(len, NH_MESSAGE_UNIT);
nhpoly1305_units(ctx, key, data, bytes);
data += bytes;
len -= bytes;
}
if (len) {
memcpy(ctx->buffer, data, len);
ctx->buflen = len;
}
}
static void nhpoly1305_final(struct nhpoly1305_ctx *ctx,
const struct adiantum_tfm_ctx *key, le128 *out)
{
if (ctx->buflen) {
memset(&ctx->buffer[ctx->buflen], 0,
NH_MESSAGE_UNIT - ctx->buflen);
nhpoly1305_units(ctx, key, ctx->buffer, NH_MESSAGE_UNIT);
}
if (ctx->nh_remaining)
process_nh_hash_value(ctx, key);
poly1305_core_emit(&ctx->poly_state, NULL, out);
}
/*
* Hash the left-hand part (the "bulk") of the message as follows:
*
* H_L Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
*
* See section 6.4 of the Adiantum paper. This is an ε-almost--universal
* (ε-U) hash function for equal-length inputs over Z/(2^{128}Z), where the ""
* operation is addition. It hashes 1024-byte chunks of the input with the NH
* hash function, reducing the input length by 32x. The resulting NH hashes are
* evaluated as a polynomial in GF(2^{130}-5), like in the Poly1305 MAC. Note
* that the polynomial evaluation by itself would suffice to achieve the ε-U
* property; NH is used for performance since it's much faster than Poly1305.
*/
static void adiantum_hash_message(struct skcipher_request *req,
struct scatterlist *sgl, le128 *out)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct scatterlist *dst = req->dst;
const unsigned int dst_nents = sg_nents(dst);
le128 digest;
int err;
unsigned int len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct scatter_walk walk;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!rctx->enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
nhpoly1305_init(&rctx->u.hash_ctx);
scatterwalk_start(&walk, sgl);
while (len) {
unsigned int n = scatterwalk_next(&walk, len);
/*
* Second hash step
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
rctx->u.hash_desc.tfm = tctx->hash;
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page destination */
struct page *page = sg_page(dst);
void *virt = kmap_local_page(page) + dst->offset;
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
(u8 *)&digest);
if (err) {
kunmap_local(virt);
return err;
}
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
flush_dcache_page(page);
kunmap_local(virt);
} else {
/* Slow path that works for any destination scatterlist */
err = adiantum_hash_message(req, dst, dst_nents, &digest);
if (err)
return err;
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
bulk_len, sizeof(le128), 1);
nhpoly1305_update(&rctx->u.hash_ctx, tctx, walk.addr, n);
scatterwalk_done_src(&walk, n);
len -= n;
}
return 0;
}
static void adiantum_streamcipher_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err)
err = adiantum_finish(req);
skcipher_request_complete(req, err);
nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
}
static int adiantum_crypt(struct skcipher_request *req, bool enc)
@@ -341,55 +371,63 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct scatterlist *src = req->src;
const unsigned int src_nents = sg_nents(src);
struct scatterlist *src = req->src, *dst = req->dst;
/*
* Buffer for right-hand part of data, i.e.
*
* P_L => P_M => C_M => C_R when encrypting, or
* C_R => C_M => P_M => P_L when decrypting.
*
* Also used to build the IV for the stream cipher.
*/
union {
u8 bytes[XCHACHA_IV_SIZE];
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
} rbuf;
le128 header_hash, msg_hash;
unsigned int stream_len;
le128 digest;
int err;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
rctx->enc = enc;
/*
* First hash step
* enc: P_M = P_R + H_{K_H}(T, P_L)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
rctx->u.hash_desc.tfm = tctx->hash;
if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
adiantum_hash_header(req, &header_hash);
if (src->length >= req->cryptlen &&
src->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page source */
void *virt = kmap_local_page(sg_page(src)) + src->offset;
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
(u8 *)&digest);
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
nhpoly1305_init(&rctx->u.hash_ctx);
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
memcpy(&rbuf.bignum, virt + bulk_len, sizeof(le128));
kunmap_local(virt);
} else {
/* Slow path that works for any source scatterlist */
err = adiantum_hash_message(req, src, src_nents, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
bulk_len, sizeof(le128), 0);
adiantum_hash_message(req, src, &msg_hash);
memcpy_from_sglist(&rbuf.bignum, src, bulk_len, sizeof(le128));
}
if (err)
return err;
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
le128_add(&rbuf.bignum, &rbuf.bignum, &header_hash);
le128_add(&rbuf.bignum, &rbuf.bignum, &msg_hash);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
if (enc)
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
crypto_cipher_encrypt_one(tctx->blockcipher, rbuf.bytes,
rbuf.bytes);
/* Initialize the rest of the XChaCha IV (first part is C_M) */
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
rctx->rbuf.words[4] = cpu_to_le32(1);
rctx->rbuf.words[5] = 0;
rctx->rbuf.words[6] = 0;
rctx->rbuf.words[7] = 0;
rbuf.words[4] = cpu_to_le32(1);
rbuf.words[5] = 0;
rbuf.words[6] = 0;
rbuf.words[7] = 0;
/*
* XChaCha needs to be done on all the data except the last 16 bytes;
@@ -406,12 +444,44 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
req->dst, stream_len, &rctx->rbuf);
req->dst, stream_len, &rbuf);
skcipher_request_set_callback(&rctx->u.streamcipher_req,
req->base.flags,
adiantum_streamcipher_done, req);
return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
adiantum_finish(req);
req->base.flags, NULL, NULL);
err = crypto_skcipher_encrypt(&rctx->u.streamcipher_req);
if (err)
return err;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rbuf.bytes,
rbuf.bytes);
/*
* Second hash step
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
le128_sub(&rbuf.bignum, &rbuf.bignum, &header_hash);
if (dst->length >= req->cryptlen &&
dst->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page destination */
struct page *page = sg_page(dst);
void *virt = kmap_local_page(page) + dst->offset;
nhpoly1305_init(&rctx->u.hash_ctx);
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
memcpy(virt + bulk_len, &rbuf.bignum, sizeof(le128));
flush_dcache_page(page);
kunmap_local(virt);
} else {
/* Slow path that works for any destination scatterlist */
adiantum_hash_message(req, dst, &msg_hash);
le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
memcpy_to_sglist(dst, bulk_len, &rbuf.bignum, sizeof(le128));
}
return 0;
}
static int adiantum_encrypt(struct skcipher_request *req)
@@ -431,8 +501,6 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
unsigned int subreq_size;
int err;
streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
@@ -445,32 +513,18 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
goto err_free_streamcipher;
}
hash = crypto_spawn_shash(&ictx->hash_spawn);
if (IS_ERR(hash)) {
err = PTR_ERR(hash);
goto err_free_blockcipher;
}
tctx->streamcipher = streamcipher;
tctx->blockcipher = blockcipher;
tctx->hash = hash;
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
sizeof(struct adiantum_request_ctx));
subreq_size = max(sizeof_field(struct adiantum_request_ctx,
u.hash_desc) +
crypto_shash_descsize(hash),
sizeof_field(struct adiantum_request_ctx,
u.streamcipher_req) +
crypto_skcipher_reqsize(streamcipher));
crypto_skcipher_set_reqsize(tfm,
offsetof(struct adiantum_request_ctx, u) +
subreq_size);
crypto_skcipher_set_reqsize(
tfm, max(sizeof(struct adiantum_request_ctx),
offsetofend(struct adiantum_request_ctx,
u.streamcipher_req) +
crypto_skcipher_reqsize(streamcipher)));
return 0;
err_free_blockcipher:
crypto_free_cipher(blockcipher);
err_free_streamcipher:
crypto_free_skcipher(streamcipher);
return err;
@@ -482,7 +536,6 @@ static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(tctx->streamcipher);
crypto_free_cipher(tctx->blockcipher);
crypto_free_shash(tctx->hash);
}
static void adiantum_free_instance(struct skcipher_instance *inst)
@@ -491,7 +544,6 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
crypto_drop_skcipher(&ictx->streamcipher_spawn);
crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_shash(&ictx->hash_spawn);
kfree(inst);
}
@@ -499,9 +551,9 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
struct crypto_alg *blockcipher_alg,
struct shash_alg *hash_alg)
static bool
adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
struct crypto_alg *blockcipher_alg)
{
if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
@@ -513,21 +565,16 @@ static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamciph
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
return false;
if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
return false;
return true;
}
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
u32 mask;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
struct skcipher_alg_common *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *hash_alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
@@ -542,7 +589,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Stream cipher, e.g. "xchacha12" */
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
crypto_attr_alg_name(tb[1]), 0,
mask | CRYPTO_ALG_ASYNC /* sync only */);
if (err)
goto err_free_inst;
streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
@@ -555,23 +603,21 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
/* NHPoly1305 ε-∆U hash function */
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
if (nhpoly1305_name == ERR_PTR(-ENOENT))
nhpoly1305_name = "nhpoly1305";
err = crypto_grab_shash(&ictx->hash_spawn,
skcipher_crypto_instance(inst),
nhpoly1305_name, 0, mask);
if (err)
/*
* Originally there was an optional third parameter, for requesting a
* specific implementation of "nhpoly1305" for message hashing. This is
* no longer supported. The best implementation is just always used.
*/
if (crypto_attr_alg_name(tb[3]) != ERR_PTR(-ENOENT)) {
err = -ENOENT;
goto err_free_inst;
hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
}
/* Check the set of algorithms */
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
hash_alg)) {
pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg)) {
pr_warn("Unsupported Adiantum instantiation: (%s,%s)\n",
streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name, hash_alg->base.cra_name);
blockcipher_alg->cra_name);
err = -EINVAL;
goto err_free_inst;
}
@@ -584,10 +630,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s,%s)",
streamcipher_alg->base.cra_driver_name,
blockcipher_alg->cra_driver_name,
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
"adiantum(%s,%s)", streamcipher_alg->base.cra_driver_name,
blockcipher_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
@@ -596,12 +640,12 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
* matter as much as that of the stream cipher and hash function. Thus,
* weigh the block cipher's ->cra_priority less.
* matter as much as that of the stream cipher. Thus, weigh the block
* cipher's ->cra_priority less.
*/
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
2 * hash_alg->base.cra_priority +
blockcipher_alg->cra_priority) / 7;
blockcipher_alg->cra_priority) /
5;
inst->alg.setkey = adiantum_setkey;
inst->alg.encrypt = adiantum_encrypt;
@@ -622,7 +666,7 @@ err_free_inst:
return err;
}
/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
/* adiantum(streamcipher_name, blockcipher_name) */
static struct crypto_template adiantum_tmpl = {
.name = "adiantum",
.create = adiantum_create,

View File

@@ -62,7 +62,7 @@ static __always_inline void crypto_aegis_aesenc(union aegis_block *dst,
const union aegis_block *key)
{
const u8 *s = src->bytes;
const u32 *t = crypto_ft_tab[0];
const u32 *t = aes_enc_tab;
u32 d0, d1, d2, d3;
d0 = t[s[ 0]] ^ rol32(t[s[ 5]], 8) ^ rol32(t[s[10]], 16) ^ rol32(t[s[15]], 24);

66
crypto/aes.c Normal file
View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Crypto API support for AES block cipher
*
* Copyright 2026 Google LLC
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
static_assert(__alignof__(struct aes_key) <= CRYPTO_MINALIGN);
static int crypto_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aes_key *key = crypto_tfm_ctx(tfm);
return aes_preparekey(key, in_key, key_len);
}
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct aes_key *key = crypto_tfm_ctx(tfm);
aes_encrypt(key, out, in);
}
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct aes_key *key = crypto_tfm_ctx(tfm);
aes_decrypt(key, out, in);
}
static struct crypto_alg alg = {
.cra_name = "aes",
.cra_driver_name = "aes-lib",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_key),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = crypto_aes_setkey,
.cia_encrypt = crypto_aes_encrypt,
.cia_decrypt = crypto_aes_decrypt } }
};
static int __init crypto_aes_mod_init(void)
{
return crypto_register_alg(&alg);
}
module_init(crypto_aes_mod_init);
static void __exit crypto_aes_mod_exit(void)
{
crypto_unregister_alg(&alg);
}
module_exit(crypto_aes_mod_exit);
MODULE_DESCRIPTION("Crypto API support for AES block cipher");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("aes-lib");

File diff suppressed because it is too large Load Diff

View File

@@ -1,83 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scalar fixed time AES core transform
*
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
return aes_expandkey(ctx, in_key, key_len);
}
static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags;
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
aes_encrypt(ctx, out, in);
local_irq_restore(flags);
}
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags;
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
aes_decrypt(ctx, out, in);
local_irq_restore(flags);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-fixed-time",
.cra_priority = 100 + 1,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = aesti_set_key,
.cra_cipher.cia_encrypt = aesti_encrypt,
.cra_cipher.cia_decrypt = aesti_decrypt
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Generic fixed time AES");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");

View File

@@ -293,7 +293,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!alg)
return -ENOENT;
/* We can not unregister core algorithms such as aes-generic.
/* We can not unregister core algorithms such as aes.
* We would loose the reference in the crypto_alg_list to this algorithm
* if we try to unregister. Unregistering such an algorithm without
* removing the module is not possible, so we restrict to crypto

View File

@@ -14,27 +14,17 @@
#include <crypto/df_sp80090a.h>
#include <crypto/internal/drbg.h>
static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
const unsigned char *key,
u8 keylen);
static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
const unsigned char *key, u8 keylen)
{
aes_expandkey(aesctx, key, keylen);
}
static void drbg_kcapi_sym(struct crypto_aes_ctx *aesctx,
unsigned char *outval,
static void drbg_kcapi_sym(struct aes_enckey *aeskey, unsigned char *outval,
const struct drbg_string *in, u8 blocklen_bytes)
{
/* there is only component in *in */
BUG_ON(in->len < blocklen_bytes);
aes_encrypt(aesctx, outval, in->buf);
aes_encrypt(aeskey, outval, in->buf);
}
/* BCC function for CTR DRBG as defined in 10.4.3 */
static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
static void drbg_ctr_bcc(struct aes_enckey *aeskey,
unsigned char *out, const unsigned char *key,
struct list_head *in,
u8 blocklen_bytes,
@@ -47,7 +37,7 @@ static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
drbg_string_fill(&data, out, blocklen_bytes);
/* 10.4.3 step 2 / 4 */
drbg_kcapi_symsetkey(aesctx, key, keylen);
aes_prepareenckey(aeskey, key, keylen);
list_for_each_entry(curr, in, list) {
const unsigned char *pos = curr->buf;
size_t len = curr->len;
@@ -56,7 +46,7 @@ static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
/* 10.4.3 step 4.2 */
if (blocklen_bytes == cnt) {
cnt = 0;
drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
drbg_kcapi_sym(aeskey, out, &data, blocklen_bytes);
}
out[cnt] ^= *pos;
pos++;
@@ -66,7 +56,7 @@ static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
}
/* 10.4.3 step 4.2 for last block */
if (cnt)
drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
drbg_kcapi_sym(aeskey, out, &data, blocklen_bytes);
}
/*
@@ -110,7 +100,7 @@ static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
*/
/* Derivation Function for CTR DRBG as defined in 10.4.2 */
int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
int crypto_drbg_ctr_df(struct aes_enckey *aeskey,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist,
u8 blocklen_bytes,
@@ -187,7 +177,7 @@ int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
*/
drbg_cpu_to_be32(i, iv);
/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
drbg_ctr_bcc(aesctx, temp + templen, K, &bcc_list,
drbg_ctr_bcc(aeskey, temp + templen, K, &bcc_list,
blocklen_bytes, keylen);
/* 10.4.2 step 9.3 */
i++;
@@ -201,7 +191,7 @@ int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
/* 10.4.2 step 13 */
drbg_kcapi_symsetkey(aesctx, temp, keylen);
aes_prepareenckey(aeskey, temp, keylen);
while (generated_len < bytes_to_return) {
short blocklen = 0;
/*
@@ -209,7 +199,7 @@ int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
* implicit as the key is only drbg_blocklen in size based on
* the implementation of the cipher function callback
*/
drbg_kcapi_sym(aesctx, X, &cipherin, blocklen_bytes);
drbg_kcapi_sym(aeskey, X, &cipherin, blocklen_bytes);
blocklen = (blocklen_bytes <
(bytes_to_return - generated_len)) ?
blocklen_bytes :

View File

@@ -1505,9 +1505,9 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
struct crypto_aes_ctx *aesctx = (struct crypto_aes_ctx *)drbg->priv_data;
struct aes_enckey *aeskey = drbg->priv_data;
kfree(aesctx);
kfree(aeskey);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
@@ -1526,16 +1526,16 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_aes_ctx *aesctx;
struct aes_enckey *aeskey;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
aesctx = kzalloc(sizeof(*aesctx), GFP_KERNEL);
if (!aesctx)
aeskey = kzalloc(sizeof(*aeskey), GFP_KERNEL);
if (!aeskey)
return -ENOMEM;
drbg->priv_data = aesctx;
drbg->priv_data = aeskey;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {

View File

@@ -1,255 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost--universal hash function for Adiantum
*
* Copyright 2018 Google LLC
*/
/*
* "NHPoly1305" is the main component of Adiantum hashing.
* Specifically, it is the calculation
*
* H_L Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
*
* from the procedure in section 6.4 of the Adiantum paper [1]. It is an
* ε-almost--universal (ε-U) hash function for equal-length inputs over
* Z/(2^{128}Z), where the "" operation is addition. It hashes 1024-byte
* chunks of the input with the NH hash function [2], reducing the input length
* by 32x. The resulting NH digests are evaluated as a polynomial in
* GF(2^{130}-5), like in the Poly1305 MAC [3]. Note that the polynomial
* evaluation by itself would suffice to achieve the ε-U property; NH is used
* for performance since it's over twice as fast as Poly1305.
*
* This is *not* a cryptographic hash function; do not use it as such!
*
* [1] Adiantum: length-preserving encryption for entry-level processors
* (https://eprint.iacr.org/2018/720.pdf)
* [2] UMAC: Fast and Secure Message Authentication
* (https://fastcrypto.org/umac/umac_proc.pdf)
* [3] The Poly1305-AES message-authentication code
* (https://cr.yp.to/mac/poly1305-20050329.pdf)
*/
#include <linux/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/nhpoly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
static void nh_generic(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES])
{
u64 sums[4] = { 0, 0, 0, 0 };
BUILD_BUG_ON(NH_PAIR_STRIDE != 2);
BUILD_BUG_ON(NH_NUM_PASSES != 4);
while (message_len) {
u32 m0 = get_unaligned_le32(message + 0);
u32 m1 = get_unaligned_le32(message + 4);
u32 m2 = get_unaligned_le32(message + 8);
u32 m3 = get_unaligned_le32(message + 12);
sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]);
sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]);
sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]);
sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]);
sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]);
sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]);
sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]);
sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]);
key += NH_MESSAGE_UNIT / sizeof(key[0]);
message += NH_MESSAGE_UNIT;
message_len -= NH_MESSAGE_UNIT;
}
hash[0] = cpu_to_le64(sums[0]);
hash[1] = cpu_to_le64(sums[1]);
hash[2] = cpu_to_le64(sums[2]);
hash[3] = cpu_to_le64(sums[3]);
}
/* Pass the next NH hash value through Poly1305 */
static void process_nh_hash_value(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key)
{
BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
}
/*
* Feed the next portion of the source data, as a whole number of 16-byte
* "NH message units", through NH and Poly1305. Each NH hash is taken over
* 1024 bytes, except possibly the final one which is taken over a multiple of
* 16 bytes up to 1024. Also, in the case where data is passed in misaligned
* chunks, we combine partial hashes; the end result is the same either way.
*/
static void nhpoly1305_units(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key,
const u8 *src, unsigned int srclen, nh_t nh_fn)
{
do {
unsigned int bytes;
if (state->nh_remaining == 0) {
/* Starting a new NH message */
bytes = min_t(unsigned int, srclen, NH_MESSAGE_BYTES);
nh_fn(key->nh_key, src, bytes, state->nh_hash);
state->nh_remaining = NH_MESSAGE_BYTES - bytes;
} else {
/* Continuing a previous NH message */
__le64 tmp_hash[NH_NUM_PASSES];
unsigned int pos;
int i;
pos = NH_MESSAGE_BYTES - state->nh_remaining;
bytes = min(srclen, state->nh_remaining);
nh_fn(&key->nh_key[pos / 4], src, bytes, tmp_hash);
for (i = 0; i < NH_NUM_PASSES; i++)
le64_add_cpu(&state->nh_hash[i],
le64_to_cpu(tmp_hash[i]));
state->nh_remaining -= bytes;
}
if (state->nh_remaining == 0)
process_nh_hash_value(state, key);
src += bytes;
srclen -= bytes;
} while (srclen);
}
int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct nhpoly1305_key *ctx = crypto_shash_ctx(tfm);
int i;
if (keylen != NHPOLY1305_KEY_SIZE)
return -EINVAL;
poly1305_core_setkey(&ctx->poly_key, key);
key += POLY1305_BLOCK_SIZE;
for (i = 0; i < NH_KEY_WORDS; i++)
ctx->nh_key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_setkey);
int crypto_nhpoly1305_init(struct shash_desc *desc)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
poly1305_core_init(&state->poly_state);
state->buflen = 0;
state->nh_remaining = 0;
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_init);
int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
const u8 *src, unsigned int srclen,
nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
unsigned int bytes;
if (state->buflen) {
bytes = min(srclen, (int)NH_MESSAGE_UNIT - state->buflen);
memcpy(&state->buffer[state->buflen], src, bytes);
state->buflen += bytes;
if (state->buflen < NH_MESSAGE_UNIT)
return 0;
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
state->buflen = 0;
src += bytes;
srclen -= bytes;
}
if (srclen >= NH_MESSAGE_UNIT) {
bytes = round_down(srclen, NH_MESSAGE_UNIT);
nhpoly1305_units(state, key, src, bytes, nh_fn);
src += bytes;
srclen -= bytes;
}
if (srclen) {
memcpy(state->buffer, src, srclen);
state->buflen = srclen;
}
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_update_helper);
int crypto_nhpoly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
return crypto_nhpoly1305_update_helper(desc, src, srclen, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_update);
int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
if (state->buflen) {
memset(&state->buffer[state->buflen], 0,
NH_MESSAGE_UNIT - state->buflen);
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
}
if (state->nh_remaining)
process_nh_hash_value(state, key);
poly1305_core_emit(&state->poly_state, NULL, dst);
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_final_helper);
int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst)
{
return crypto_nhpoly1305_final_helper(desc, dst, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_final);
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-generic",
.base.cra_priority = 100,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = crypto_nhpoly1305_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-generic");

View File

@@ -4061,14 +4061,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
.generic_driver = "adiantum(xchacha12-lib,aes-generic,nhpoly1305-generic)",
.generic_driver = "adiantum(xchacha12-lib,aes-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
.generic_driver = "adiantum(xchacha20-lib,aes-generic,nhpoly1305-generic)",
.generic_driver = "adiantum(xchacha20-lib,aes-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -4088,7 +4088,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.generic_driver = "authenc(hmac-sha1-lib,cbc(aes-generic))",
.generic_driver = "authenc(hmac-sha1-lib,cbc(aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4139,7 +4139,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.generic_driver = "authenc(hmac-sha256-lib,cbc(aes-generic))",
.generic_driver = "authenc(hmac-sha256-lib,cbc(aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4165,7 +4165,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha256),cts(cbc(aes)))",
.generic_driver = "authenc(hmac-sha256-lib,cts(cbc(aes-generic)))",
.generic_driver = "authenc(hmac-sha256-lib,cts(cbc(aes-lib)))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128)
@@ -4194,7 +4194,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha384),cts(cbc(aes)))",
.generic_driver = "authenc(hmac-sha384-lib,cts(cbc(aes-generic)))",
.generic_driver = "authenc(hmac-sha384-lib,cts(cbc(aes-lib)))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192)
@@ -4205,7 +4205,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.generic_driver = "authenc(hmac-sha512-lib,cbc(aes-generic))",
.generic_driver = "authenc(hmac-sha512-lib,cbc(aes-lib))",
.fips_allowed = 1,
.test = alg_test_aead,
.suite = {
@@ -4267,6 +4267,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "cbc(aes)",
.generic_driver = "cbc(aes-lib)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -4362,6 +4363,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif
.alg = "cbcmac(aes)",
.generic_driver = "cbcmac(aes-lib)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cbcmac_tv_template)
@@ -4374,7 +4376,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ccm(aes)",
.generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
.generic_driver = "ccm_base(ctr(aes-lib),cbcmac(aes-lib))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4402,6 +4404,7 @@ static const struct alg_test_desc alg_test_descs[] = {
},
}, {
.alg = "cmac(aes)",
.generic_driver = "cmac(aes-lib)",
.fips_allowed = 1,
.test = alg_test_hash,
.suite = {
@@ -4443,6 +4446,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ctr(aes)",
.generic_driver = "ctr(aes-lib)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -4533,6 +4537,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif
.alg = "cts(cbc(aes))",
.generic_driver = "cts(cbc(aes-lib))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -4689,6 +4694,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
}, {
.alg = "ecb(aes)",
.generic_driver = "ecb(aes-lib)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -4881,7 +4887,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
.generic_driver = "essiv(authenc(hmac-sha256-lib,cbc(aes-generic)),sha256-lib)",
.generic_driver = "essiv(authenc(hmac-sha256-lib,cbc(aes-lib)),sha256-lib)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4889,7 +4895,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "essiv(cbc(aes),sha256)",
.generic_driver = "essiv(cbc(aes-generic),sha256-lib)",
.generic_driver = "essiv(cbc(aes-lib),sha256-lib)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -4934,7 +4940,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
.alg = "gcm(aes)",
.generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
.generic_driver = "gcm_base(ctr(aes-lib),ghash-generic)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4962,7 +4968,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hctr2(aes)",
.generic_driver = "hctr2_base(xctr(aes-generic),polyval-lib)",
.generic_driver = "hctr2_base(xctr(aes-lib),polyval-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_hctr2_tv_template)
@@ -5080,7 +5086,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite.aead = __VECS(krb5_test_camellia_cts_cmac)
}, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.generic_driver = "lrw(ecb(aes-lib))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_lrw_tv_template)
@@ -5172,12 +5178,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(michael_mic_tv_template)
}
}, {
.alg = "nhpoly1305",
.test = alg_test_hash,
.suite = {
.hash = __VECS(nhpoly1305_tv_template)
}
}, {
.alg = "p1363(ecdsa-nist-p192)",
.test = alg_test_null,
@@ -5275,6 +5275,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "rfc3686(ctr(aes))",
.generic_driver = "rfc3686(ctr(aes-lib))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -5288,7 +5289,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4106(gcm(aes))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-lib),ghash-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -5300,7 +5301,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4309(ccm(aes))",
.generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
.generic_driver = "rfc4309(ccm_base(ctr(aes-lib),cbcmac(aes-lib)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -5312,7 +5313,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4543(gcm(aes))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-lib),ghash-generic))",
.test = alg_test_aead,
.suite = {
.aead = {
@@ -5489,6 +5490,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "xcbc(aes)",
.generic_driver = "xcbc(aes-lib)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_xcbc128_tv_template)
@@ -5515,13 +5517,14 @@ static const struct alg_test_desc alg_test_descs[] = {
},
}, {
.alg = "xctr(aes)",
.generic_driver = "xctr(aes-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_xctr_tv_template)
}
}, {
.alg = "xts(aes)",
.generic_driver = "xts(ecb(aes-generic))",
.generic_driver = "xts(ecb(aes-lib))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {

File diff suppressed because it is too large Load Diff

View File

@@ -126,7 +126,7 @@ struct tpm2_auth {
u8 session_key[SHA256_DIGEST_SIZE];
u8 passphrase[SHA256_DIGEST_SIZE];
int passphrase_len;
struct crypto_aes_ctx aes_ctx;
struct aes_enckey aes_key;
/* saved session attributes: */
u8 attrs;
__be32 ordinal;
@@ -677,8 +677,8 @@ int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
auth->scratch);
len = tpm_buf_read_u16(buf, &offset_p);
aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p],
aes_prepareenckey(&auth->aes_key, auth->scratch, AES_KEY_BYTES);
aescfb_encrypt(&auth->aes_key, &buf->data[offset_p],
&buf->data[offset_p], len,
auth->scratch + AES_KEY_BYTES);
/* reset p to beginning of parameters for HMAC */
@@ -858,8 +858,8 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
auth->scratch);
len = tpm_buf_read_u16(buf, &offset_p);
aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p],
aes_prepareenckey(&auth->aes_key, auth->scratch, AES_KEY_BYTES);
aescfb_decrypt(&auth->aes_key, &buf->data[offset_p],
&buf->data[offset_p], len,
auth->scratch + AES_KEY_BYTES);
}

View File

@@ -491,19 +491,19 @@ static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
unsigned int keylen)
{
struct crypto_aes_ctx ctx;
struct aes_enckey aes;
uint8_t src[16] = { 0 };
int rc;
rc = aes_expandkey(&ctx, key, keylen);
rc = aes_prepareenckey(&aes, key, keylen);
if (rc) {
pr_err("aes_expandkey() failed: %d\n", rc);
pr_err("aes_prepareenckey() failed: %d\n", rc);
return rc;
}
aes_encrypt(&ctx, src, src);
aes_encrypt(&aes, src, src);
crypto4xx_memcpy_to_le32(hash_start, src, 16);
memzero_explicit(&ctx, sizeof(ctx));
memzero_explicit(&aes, sizeof(aes));
return 0;
}

View File

@@ -261,7 +261,7 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
u64 rb_hi = 0x00, rb_lo = 0x87;
struct crypto_aes_ctx aes;
struct aes_enckey aes;
__be64 *gk;
int ret;
@@ -284,7 +284,7 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.key_len = 0;
/* Set the key for the AES cipher used to generate the keys */
ret = aes_expandkey(&aes, key, key_len);
ret = aes_prepareenckey(&aes, key, key_len);
if (ret)
return ret;

View File

@@ -1028,7 +1028,7 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_aes_ctx aes;
struct aes_key aes;
int ret, i;
u8 *key;
unsigned int keylen;
@@ -1044,9 +1044,9 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
*/
if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
ret = aes_expandkey(&aes, key, keylen - 8);
ret = aes_preparekey(&aes, key, keylen - 8);
else
ret = aes_expandkey(&aes, key, keylen);
ret = aes_preparekey(&aes, key, keylen);
if (ret)
return ret;
aes_encrypt(&aes, iv, iv);
@@ -3406,7 +3406,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
unsigned int ck_size;
int ret = 0, key_ctx_size = 0;
struct crypto_aes_ctx aes;
struct aes_enckey aes;
aeadctx->enckey_len = 0;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -3444,7 +3444,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
*/
ret = aes_expandkey(&aes, key, keylen);
ret = aes_prepareenckey(&aes, key, keylen);
if (ret) {
aeadctx->enckey_len = 0;
goto out;

View File

@@ -2507,19 +2507,17 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
struct aes_enckey aes;
u32 hashkey[AES_BLOCK_SIZE >> 2];
int ret, i;
ret = aes_expandkey(&aes, key, len);
if (ret) {
memzero_explicit(&aes, sizeof(aes));
ret = aes_prepareenckey(&aes, key, len);
if (ret)
return ret;
}
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
if (ctx->key[i] != get_unaligned((__le32 *)key + i)) {
ctx->base.needs_inv = true;
break;
}
@@ -2527,7 +2525,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
}
for (i = 0; i < len / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
ctx->key[i] = get_unaligned((__le32 *)key + i);
ctx->key_len = len;

View File

@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
struct crypto_aes_ctx *aes;
struct aes_enckey *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
@@ -1976,7 +1976,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
ret = aes_expandkey(ctx->aes, key, len);
ret = aes_prepareenckey(ctx->aes, key, len);
if (ret)
return ret;
@@ -1990,9 +1990,9 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
ctx->base.ipad.word[i] = swab32(key_tmp[i]);
ret = aes_expandkey(ctx->aes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
ret = aes_prepareenckey(ctx->aes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
if (ret)
return ret;
@@ -2062,12 +2062,12 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
/* precompute the CMAC key material */
ret = aes_expandkey(ctx->aes, key, len);
ret = aes_prepareenckey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
ctx->base.ipad.word[i + 8] = get_unaligned_be32(&key[4 * i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */

View File

@@ -177,7 +177,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
aes_encrypt(&ctx->akey, (u8 *)tag, (const u8 *)iv);
return 0;
}
@@ -314,7 +314,7 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
ret = aes_expandkey(&ctx->actx, key, keylen);
ret = aes_prepareenckey(&ctx->akey, key, keylen);
if (ret)
return ret;
@@ -334,7 +334,7 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
return -EINVAL;
keylen -= 4;
ret = aes_expandkey(&ctx->actx, key, keylen);
ret = aes_prepareenckey(&ctx->akey, key, keylen);
if (ret)
return ret;

View File

@@ -98,7 +98,7 @@ struct omap_aes_ctx {
struct omap_aes_gcm_ctx {
struct omap_aes_ctx octx;
struct crypto_aes_ctx actx;
struct aes_enckey akey;
};
struct omap_aes_reqctx {

View File

@@ -983,27 +983,27 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req)
static int starfive_aes_ecb_init_tfm(struct crypto_skcipher *tfm)
{
return starfive_aes_init_tfm(tfm, "ecb(aes-generic)");
return starfive_aes_init_tfm(tfm, "ecb(aes-lib)");
}
static int starfive_aes_cbc_init_tfm(struct crypto_skcipher *tfm)
{
return starfive_aes_init_tfm(tfm, "cbc(aes-generic)");
return starfive_aes_init_tfm(tfm, "cbc(aes-lib)");
}
static int starfive_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
return starfive_aes_init_tfm(tfm, "ctr(aes-generic)");
return starfive_aes_init_tfm(tfm, "ctr(aes-lib)");
}
static int starfive_aes_ccm_init_tfm(struct crypto_aead *tfm)
{
return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-generic),cbcmac(aes-generic))");
return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-lib),cbcmac(aes-lib))");
}
static int starfive_aes_gcm_init_tfm(struct crypto_aead *tfm)
{
return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-generic),ghash-generic)");
return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-lib),ghash-generic)");
}
static struct skcipher_engine_alg skcipher_algs[] = {

View File

@@ -60,7 +60,7 @@ struct xilinx_rng {
void __iomem *rng_base;
struct device *dev;
unsigned char *scratchpadbuf;
struct crypto_aes_ctx *aesctx;
struct aes_enckey *aeskey;
struct mutex lock; /* Protect access to TRNG device */
struct hwrng trng;
};
@@ -198,7 +198,7 @@ static int xtrng_reseed_internal(struct xilinx_rng *rng)
ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
if (ret != TRNG_SEED_LEN_BYTES)
return -EINVAL;
ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf,
ret = crypto_drbg_ctr_df(rng->aeskey, rng->scratchpadbuf,
TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE,
TRNG_SEED_LEN_BYTES);
if (ret)
@@ -349,8 +349,8 @@ static int xtrng_probe(struct platform_device *pdev)
return PTR_ERR(rng->rng_base);
}
rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL);
if (!rng->aesctx)
rng->aeskey = devm_kzalloc(&pdev->dev, sizeof(*rng->aeskey), GFP_KERNEL);
if (!rng->aeskey)
return -ENOMEM;
sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE);

View File

@@ -170,7 +170,7 @@ static int ch_ipsec_setkey(struct xfrm_state *x,
unsigned char *key = x->aead->alg_key;
int ck_size, key_ctx_size = 0;
unsigned char ghash_h[AEAD_H_SIZE];
struct crypto_aes_ctx aes;
struct aes_enckey aes;
int ret = 0;
if (keylen > 3) {
@@ -204,7 +204,7 @@ static int ch_ipsec_setkey(struct xfrm_state *x,
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
*/
ret = aes_expandkey(&aes, key, keylen);
ret = aes_prepareenckey(&aes, key, keylen);
if (ret) {
sa_entry->enckey_len = 0;
goto out;

View File

@@ -76,7 +76,7 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
struct ktls_key_ctx *kctx = &tx_info->key_ctx;
struct crypto_aes_ctx aes_ctx;
struct aes_enckey aes;
unsigned char *key, *salt;
switch (crypto_info->cipher_type) {
@@ -138,13 +138,13 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
* It will go in key context
*/
ret = aes_expandkey(&aes_ctx, key, keylen);
ret = aes_prepareenckey(&aes, key, keylen);
if (ret)
goto out;
memset(ghash_h, 0, ghash_size);
aes_encrypt(&aes_ctx, ghash_h, ghash_h);
memzero_explicit(&aes_ctx, sizeof(aes_ctx));
aes_encrypt(&aes, ghash_h, ghash_h);
memzero_explicit(&aes, sizeof(aes));
/* fill the Key context */
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {

View File

@@ -247,7 +247,7 @@ static int chtls_key_info(struct chtls_sock *csk,
unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
struct aes_enckey aes;
int ret;
key_ctx_size = sizeof(struct _key_ctx) +
@@ -291,7 +291,7 @@ static int chtls_key_info(struct chtls_sock *csk,
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
*/
ret = aes_expandkey(&aes, key, keylen);
ret = aes_prepareenckey(&aes, key, keylen);
if (ret)
return ret;

View File

@@ -504,15 +504,15 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
static int vsc8584_macsec_derive_key(const u8 *key, u16 key_len, u8 hkey[16])
{
const u8 input[AES_BLOCK_SIZE] = {0};
struct crypto_aes_ctx ctx;
struct aes_enckey aes;
int ret;
ret = aes_expandkey(&ctx, key, key_len);
ret = aes_prepareenckey(&aes, key, key_len);
if (ret)
return ret;
aes_encrypt(&ctx, hkey, input);
memzero_explicit(&ctx, sizeof(ctx));
aes_encrypt(&aes, hkey, input);
memzero_explicit(&aes, sizeof(aes));
return 0;
}

View File

@@ -637,11 +637,11 @@ exit:
/****************************************/
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
{
struct crypto_aes_ctx ctx;
struct aes_enckey aes;
aes_expandkey(&ctx, key, 16);
aes_encrypt(&ctx, ciphertext, data);
memzero_explicit(&ctx, sizeof(ctx));
aes_prepareenckey(&aes, key, 16);
aes_encrypt(&aes, ciphertext, data);
memzero_explicit(&aes, sizeof(aes));
}
/************************************************/
@@ -1406,13 +1406,13 @@ static void gf_mulx(u8 *pad)
static int omac1_aes_128_vector(u8 *key, size_t num_elem,
u8 *addr[], size_t *len, u8 *mac)
{
struct crypto_aes_ctx ctx;
struct aes_enckey aes;
u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
u8 *pos, *end;
size_t i, e, left, total_len;
int ret;
ret = aes_expandkey(&ctx, key, 16);
ret = aes_prepareenckey(&aes, key, 16);
if (ret)
return -1;
memset(cbc, 0, AES_BLOCK_SIZE);
@@ -1436,12 +1436,12 @@ static int omac1_aes_128_vector(u8 *key, size_t num_elem,
}
}
if (left > AES_BLOCK_SIZE)
aes_encrypt(&ctx, cbc, cbc);
aes_encrypt(&aes, cbc, cbc);
left -= AES_BLOCK_SIZE;
}
memset(pad, 0, AES_BLOCK_SIZE);
aes_encrypt(&ctx, pad, pad);
aes_encrypt(&aes, pad, pad);
gf_mulx(pad);
if (left || total_len == 0) {
@@ -1459,8 +1459,8 @@ static int omac1_aes_128_vector(u8 *key, size_t num_elem,
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
aes_encrypt(&ctx, pad, mac);
memzero_explicit(&ctx, sizeof(ctx));
aes_encrypt(&aes, pad, mac);
memzero_explicit(&aes, sizeof(aes));
return 0;
}

View File

@@ -18,6 +18,103 @@
#define AES_MAX_KEYLENGTH (15 * 16)
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
/*
* The POWER8 VSX optimized AES assembly code is borrowed from OpenSSL and
* inherits OpenSSL's AES_KEY format, which stores the number of rounds after
* the round keys. That assembly code is difficult to change. So for
* compatibility purposes we reserve space for the extra nrounds field on PPC64.
*
* Note: when prepared for decryption, the round keys are just the reversed
* standard round keys, not the round keys for the Equivalent Inverse Cipher.
*/
struct p8_aes_key {
u32 rndkeys[AES_MAX_KEYLENGTH_U32];
int nrounds;
};
union aes_enckey_arch {
u32 rndkeys[AES_MAX_KEYLENGTH_U32];
#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
/* Used unconditionally (when SPE AES code is enabled in kconfig) */
u32 spe_enc_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
#elif defined(CONFIG_PPC)
/*
* Kernels that include the POWER8 VSX optimized AES code use this field
* when that code is usable at key preparation time. Otherwise they
* fall back to rndkeys. In the latter case, p8.nrounds (which doesn't
* overlap rndkeys) is set to 0 to differentiate the two formats.
*/
struct p8_aes_key p8;
#elif defined(CONFIG_S390)
/* Used when the CPU supports CPACF AES for this key's length */
u8 raw_key[AES_MAX_KEY_SIZE];
#elif defined(CONFIG_SPARC64)
/* Used when the CPU supports the SPARC64 AES opcodes */
u64 sparc_rndkeys[AES_MAX_KEYLENGTH / sizeof(u64)];
#endif
#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
};
union aes_invkey_arch {
u32 inv_rndkeys[AES_MAX_KEYLENGTH_U32];
#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
/* Used unconditionally (when SPE AES code is enabled in kconfig) */
u32 spe_dec_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
#elif defined(CONFIG_PPC)
/* Used conditionally, analogous to aes_enckey_arch::p8 */
struct p8_aes_key p8;
#endif
#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
};
/**
* struct aes_enckey - An AES key prepared for encryption
* @len: Key length in bytes: 16 for AES-128, 24 for AES-192, 32 for AES-256.
* @nrounds: Number of rounds: 10 for AES-128, 12 for AES-192, 14 for AES-256.
* This is '6 + @len / 4' and is cached so that AES implementations
* that need it don't have to recompute it for each en/decryption.
* @padding: Padding to make offsetof(@k) be a multiple of 16, so that aligning
* this struct to a 16-byte boundary results in @k also being 16-byte
* aligned. Users aren't required to align this struct to 16 bytes,
* but it may slightly improve performance.
* @k: This typically contains the AES round keys as an array of '@nrounds + 1'
* groups of four u32 words. However, architecture-specific implementations
* of AES may store something else here, e.g. just the raw key if it's all
* they need.
*
* Note that this struct is about half the size of struct aes_key. This is
* separate from struct aes_key so that modes that need only AES encryption
* (e.g. AES-GCM, AES-CTR, AES-CMAC, tweak key in AES-XTS) don't incur the time
* and space overhead of computing and caching the decryption round keys.
*
* Note that there's no decryption-only equivalent (i.e. "struct aes_deckey"),
* since (a) it's rare that modes need decryption-only, and (b) some AES
* implementations use the same @k for both encryption and decryption, either
* always or conditionally; in the latter case both @k and @inv_k are needed.
*/
struct aes_enckey {
u32 len;
u32 nrounds;
u32 padding[2];
union aes_enckey_arch k;
};
/**
* struct aes_key - An AES key prepared for encryption and decryption
* @aes_enckey: Common fields and the key prepared for encryption
* @inv_k: This generally contains the round keys for the AES Equivalent
* Inverse Cipher, as an array of '@nrounds + 1' groups of four u32
* words. However, architecture-specific implementations of AES may
* store something else here. For example, they may leave this field
* uninitialized if they use @k for both encryption and decryption.
*/
struct aes_key {
struct aes_enckey; /* Include all fields of aes_enckey. */
union aes_invkey_arch inv_k;
};
/*
* Please ensure that the first two fields are 16-byte aligned
* relative to the start of the structure, i.e., don't move them!
@@ -28,13 +125,10 @@ struct crypto_aes_ctx {
u32 key_length;
};
extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
/*
* validate key length for AES algorithms
*/
static inline int aes_check_keylen(unsigned int keylen)
static inline int aes_check_keylen(size_t keylen)
{
switch (keylen) {
case AES_KEYSIZE_128:
@@ -48,9 +142,6 @@ static inline int aes_check_keylen(unsigned int keylen)
return 0;
}
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
@@ -68,28 +159,177 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
/**
* aes_encrypt - Encrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the ciphertext
* @in: Buffer containing the plaintext
/*
* The following functions are temporarily exported for use by the AES mode
* implementations in arch/$(SRCARCH)/crypto/. These exports will go away when
* that code is migrated into lib/crypto/.
*/
void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
#ifdef CONFIG_ARM64
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
#elif defined(CONFIG_PPC)
void ppc_expand_key_128(u32 *key_enc, const u8 *key);
void ppc_expand_key_192(u32 *key_enc, const u8 *key);
void ppc_expand_key_256(u32 *key_enc, const u8 *key);
void ppc_generate_decrypt_key(u32 *key_dec, u32 *key_enc, unsigned int key_len);
void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
u32 bytes);
void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
u32 bytes);
void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
u8 *iv);
void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
u8 *iv);
void ppc_crypt_ctr(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
u8 *iv);
void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
u8 *iv, u32 *key_twk);
void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
u8 *iv, u32 *key_twk);
int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
struct p8_aes_key *key);
int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
struct p8_aes_key *key);
void aes_p8_encrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
void aes_p8_decrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
const struct p8_aes_key *key, u8 *iv, const int enc);
void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, size_t len,
const struct p8_aes_key *key, const u8 *iv);
void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
const struct p8_aes_key *key1,
const struct p8_aes_key *key2, u8 *iv);
void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
const struct p8_aes_key *key1,
const struct p8_aes_key *key2, u8 *iv);
#elif defined(CONFIG_SPARC64)
void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
unsigned int key_len);
void aes_sparc64_load_encrypt_keys_128(const u64 *key);
void aes_sparc64_load_encrypt_keys_192(const u64 *key);
void aes_sparc64_load_encrypt_keys_256(const u64 *key);
void aes_sparc64_load_decrypt_keys_128(const u64 *key);
void aes_sparc64_load_decrypt_keys_192(const u64 *key);
void aes_sparc64_load_decrypt_keys_256(const u64 *key);
void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input, u64 *output,
unsigned int len);
void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input, u64 *output,
unsigned int len, u64 *iv);
#endif
/**
* aes_decrypt - Decrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the plaintext
* @in: Buffer containing the ciphertext
* aes_preparekey() - Prepare an AES key for encryption and decryption
* @key: (output) The key structure to initialize
* @in_key: The raw AES key
* @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
* AES_KEYSIZE_192, or AES_KEYSIZE_256.
*
* This prepares an AES key for both the encryption and decryption directions of
* the block cipher. Typically this involves expanding the raw key into both
* the standard round keys and the Equivalent Inverse Cipher round keys, but
* some architecture-specific implementations don't do the full expansion here.
*
* The caller is responsible for zeroizing both the struct aes_key and the raw
* key once they are no longer needed.
*
* If you don't need decryption support, use aes_prepareenckey() instead.
*
* Return: 0 on success or -EINVAL if the given key length is invalid. No other
* errors are possible, so callers that always pass a valid key length
* don't need to check for errors.
*
* Context: Any context.
*/
void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
int aes_preparekey(struct aes_key *key, const u8 *in_key, size_t key_len);
/**
* aes_prepareenckey() - Prepare an AES key for encryption-only
* @key: (output) The key structure to initialize
* @in_key: The raw AES key
* @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
* AES_KEYSIZE_192, or AES_KEYSIZE_256.
*
* This prepares an AES key for only the encryption direction of the block
* cipher. Typically this involves expanding the raw key into only the standard
* round keys, resulting in a struct about half the size of struct aes_key.
*
* The caller is responsible for zeroizing both the struct aes_enckey and the
* raw key once they are no longer needed.
*
* Note that while the resulting prepared key supports only AES encryption, it
* can still be used for decrypting in a mode of operation that uses AES in only
* the encryption (forward) direction, for example counter mode.
*
* Return: 0 on success or -EINVAL if the given key length is invalid. No other
* errors are possible, so callers that always pass a valid key length
* don't need to check for errors.
*
* Context: Any context.
*/
int aes_prepareenckey(struct aes_enckey *key, const u8 *in_key, size_t key_len);
typedef union {
const struct aes_enckey *enc_key;
const struct aes_key *full_key;
} aes_encrypt_arg __attribute__ ((__transparent_union__));
/**
* aes_encrypt() - Encrypt a single AES block
* @key: The AES key, as a pointer to either an encryption-only key
* (struct aes_enckey) or a full, bidirectional key (struct aes_key).
* @out: Buffer to store the ciphertext block
* @in: Buffer containing the plaintext block
*
* Context: Any context.
*/
void aes_encrypt(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
const u8 in[at_least AES_BLOCK_SIZE]);
/**
* aes_decrypt() - Decrypt a single AES block
* @key: The AES key, previously initialized by aes_preparekey()
* @out: Buffer to store the plaintext block
* @in: Buffer containing the ciphertext block
*
* Context: Any context.
*/
void aes_decrypt(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
const u8 in[at_least AES_BLOCK_SIZE]);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
extern const u32 aes_enc_tab[256];
extern const u32 aes_dec_tab[256];
void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
void aescfb_encrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
void aescfb_decrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
#endif

View File

@@ -18,7 +18,7 @@ static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
statelen + blocklen; /* temp */
}
int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
int crypto_drbg_ctr_df(struct aes_enckey *aes,
unsigned char *df_data,
size_t bytes_to_return,
struct list_head *seedlist,

View File

@@ -66,7 +66,7 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
struct aesgcm_ctx {
be128 ghash_key;
struct crypto_aes_ctx aes_ctx;
struct aes_enckey aes_key;
unsigned int authsize;
};

62
include/crypto/mldsa.h Normal file
View File

@@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Support for verifying ML-DSA signatures
*
* Copyright 2025 Google LLC
*/
#ifndef _CRYPTO_MLDSA_H
#define _CRYPTO_MLDSA_H
#include <linux/types.h>
/* Identifier for an ML-DSA parameter set */
enum mldsa_alg {
MLDSA44, /* ML-DSA-44 */
MLDSA65, /* ML-DSA-65 */
MLDSA87, /* ML-DSA-87 */
};
/* Lengths of ML-DSA public keys and signatures in bytes */
#define MLDSA44_PUBLIC_KEY_SIZE 1312
#define MLDSA65_PUBLIC_KEY_SIZE 1952
#define MLDSA87_PUBLIC_KEY_SIZE 2592
#define MLDSA44_SIGNATURE_SIZE 2420
#define MLDSA65_SIGNATURE_SIZE 3309
#define MLDSA87_SIGNATURE_SIZE 4627
/**
* mldsa_verify() - Verify an ML-DSA signature
* @alg: The ML-DSA parameter set to use
* @sig: The signature
* @sig_len: Length of the signature in bytes. Should match the
* MLDSA*_SIGNATURE_SIZE constant associated with @alg,
* otherwise -EBADMSG will be returned.
* @msg: The message
* @msg_len: Length of the message in bytes
* @pk: The public key
* @pk_len: Length of the public key in bytes. Should match the
* MLDSA*_PUBLIC_KEY_SIZE constant associated with @alg,
* otherwise -EBADMSG will be returned.
*
* This verifies a signature using pure ML-DSA with the specified parameter set.
* The context string is assumed to be empty. This corresponds to FIPS 204
* Algorithm 3 "ML-DSA.Verify" with the ctx parameter set to the empty string
* and the lengths of the signature and key given explicitly by the caller.
*
* Context: Might sleep
*
* Return:
* * 0 if the signature is valid
* * -EBADMSG if the signature and/or public key is malformed
* * -EKEYREJECTED if the signature is invalid but otherwise well-formed
* * -ENOMEM if out of memory so the validity of the signature is unknown
*/
int mldsa_verify(enum mldsa_alg alg, const u8 *sig, size_t sig_len,
const u8 *msg, size_t msg_len, const u8 *pk, size_t pk_len);
#if IS_ENABLED(CONFIG_CRYPTO_LIB_MLDSA_KUNIT_TEST)
/* Internal function, exposed only for unit testing */
s32 mldsa_use_hint(u8 h, s32 r, s32 gamma2);
#endif
#endif /* _CRYPTO_MLDSA_H */

52
include/crypto/nh.h Normal file
View File

@@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* NH hash function for Adiantum
*/
#ifndef _CRYPTO_NH_H
#define _CRYPTO_NH_H
#include <linux/types.h>
/* NH parameterization: */
/* Endianness: little */
/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
#define NH_PAIR_STRIDE 2
#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
#define NH_NUM_PASSES 4
#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
/* Max message size: 1024 bytes (32x compression factor) */
#define NH_NUM_STRIDES 64
#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
/**
* nh() - NH hash function for Adiantum
* @key: The key. @message_len + 48 bytes of it are used. This is NH_KEY_BYTES
* if @message_len has its maximum length of NH_MESSAGE_BYTES.
* @message: The message
* @message_len: The message length in bytes. Must be a multiple of 16
* (NH_MESSAGE_UNIT) and at most 1024 (NH_MESSAGE_BYTES).
* @hash: (output) The resulting hash value
*
* Note: the pseudocode for NH in the Adiantum paper iterates over 1024-byte
* segments of the message, computes a 32-byte hash for each, and returns all
* the hashes concatenated together. In contrast, this function just hashes one
* segment and returns one hash. It's the caller's responsibility to call this
* function for each 1024-byte segment and collect all the hashes.
*
* Context: Any context.
*/
void nh(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
#endif /* _CRYPTO_NH_H */

View File

@@ -1,74 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values and helper functions for the NHPoly1305 hash function.
*/
#ifndef _NHPOLY1305_H
#define _NHPOLY1305_H
#include <crypto/hash.h>
#include <crypto/internal/poly1305.h>
/* NH parameterization: */
/* Endianness: little */
/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
#define NH_PAIR_STRIDE 2
#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
#define NH_NUM_PASSES 4
#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
/* Max message size: 1024 bytes (32x compression factor) */
#define NH_NUM_STRIDES 64
#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
struct nhpoly1305_key {
struct poly1305_core_key poly_key;
u32 nh_key[NH_KEY_WORDS];
};
struct nhpoly1305_state {
/* Running total of polynomial evaluation */
struct poly1305_state poly_state;
/* Partial block buffer */
u8 buffer[NH_MESSAGE_UNIT];
unsigned int buflen;
/*
* Number of bytes remaining until the current NH message reaches
* NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
*/
unsigned int nh_remaining;
__le64 nh_hash[NH_NUM_PASSES];
};
typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen);
int crypto_nhpoly1305_init(struct shash_desc *desc);
int crypto_nhpoly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen);
int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
const u8 *src, unsigned int srclen,
nh_t nh_fn);
int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
nh_t nh_fn);
#endif /* _NHPOLY1305_H */

View File

@@ -11,6 +11,18 @@ config CRYPTO_LIB_UTILS
config CRYPTO_LIB_AES
tristate
config CRYPTO_LIB_AES_ARCH
bool
depends on CRYPTO_LIB_AES && !UML && !KMSAN
default y if ARM
default y if ARM64
default y if PPC && (SPE || (PPC64 && VSX))
default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
default y if S390
default y if SPARC64
default y if X86
config CRYPTO_LIB_AESCFB
tristate
select CRYPTO_LIB_AES
@@ -101,6 +113,26 @@ config CRYPTO_LIB_MD5_ARCH
default y if PPC
default y if SPARC64
config CRYPTO_LIB_MLDSA
tristate
select CRYPTO_LIB_SHA3
help
The ML-DSA library functions. Select this if your module uses any of
the functions from <crypto/mldsa.h>.
config CRYPTO_LIB_NH
tristate
help
Implementation of the NH almost-universal hash function, specifically
the variant of NH used in Adiantum.
config CRYPTO_LIB_NH_ARCH
bool
depends on CRYPTO_LIB_NH && !UML && !KMSAN
default y if ARM && KERNEL_MODE_NEON
default y if ARM64 && KERNEL_MODE_NEON
default y if X86_64
config CRYPTO_LIB_POLY1305
tristate
help

View File

@@ -15,8 +15,47 @@ obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
libcryptoutils-y := memneq.o utils.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
################################################################################
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
ifeq ($(CONFIG_CRYPTO_LIB_AES_ARCH),y)
CFLAGS_aes.o += -I$(src)/$(SRCARCH)
libaes-$(CONFIG_ARM) += arm/aes-cipher-core.o
ifeq ($(CONFIG_ARM64),y)
libaes-y += arm64/aes-cipher-core.o
libaes-$(CONFIG_KERNEL_MODE_NEON) += arm64/aes-ce-core.o
endif
ifeq ($(CONFIG_PPC),y)
ifeq ($(CONFIG_SPE),y)
libaes-y += powerpc/aes-spe-core.o \
powerpc/aes-spe-keys.o \
powerpc/aes-spe-modes.o \
powerpc/aes-tab-4k.o
else
libaes-y += powerpc/aesp8-ppc.o
aes-perlasm-flavour-y := linux-ppc64
aes-perlasm-flavour-$(CONFIG_PPC64_ELF_ABI_V2) := linux-ppc64-elfv2
aes-perlasm-flavour-$(CONFIG_CPU_LITTLE_ENDIAN) := linux-ppc64le
quiet_cmd_perlasm_aes = PERLASM $@
cmd_perlasm_aes = $(PERL) $< $(aes-perlasm-flavour-y) $@
# Use if_changed instead of cmd, in case the flavour changed.
$(obj)/powerpc/aesp8-ppc.S: $(src)/powerpc/aesp8-ppc.pl FORCE
$(call if_changed,perlasm_aes)
targets += powerpc/aesp8-ppc.S
OBJECT_FILES_NON_STANDARD_powerpc/aesp8-ppc.o := y
endif # !CONFIG_SPE
endif # CONFIG_PPC
libaes-$(CONFIG_RISCV) += riscv/aes-riscv64-zvkned.o
libaes-$(CONFIG_SPARC) += sparc/aes_asm.o
libaes-$(CONFIG_X86) += x86/aes-aesni.o
endif # CONFIG_CRYPTO_LIB_AES_ARCH
################################################################################
obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
libaescfb-y := aescfb.o
@@ -126,6 +165,22 @@ endif # CONFIG_CRYPTO_LIB_MD5_ARCH
################################################################################
obj-$(CONFIG_CRYPTO_LIB_MLDSA) += libmldsa.o
libmldsa-y := mldsa.o
################################################################################
obj-$(CONFIG_CRYPTO_LIB_NH) += libnh.o
libnh-y := nh.o
ifeq ($(CONFIG_CRYPTO_LIB_NH_ARCH),y)
CFLAGS_nh.o += -I$(src)/$(SRCARCH)
libnh-$(CONFIG_ARM) += arm/nh-neon-core.o
libnh-$(CONFIG_ARM64) += arm64/nh-neon-core.o
libnh-$(CONFIG_X86) += x86/nh-sse2.o x86/nh-avx2.o
endif
################################################################################
obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o
libpoly1305-y := poly1305.o
ifeq ($(CONFIG_ARCH_SUPPORTS_INT128),y)

View File

@@ -1,19 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org>
* Copyright 2026 Google LLC
*/
#include <crypto/aes.h>
#include <linux/cache.h>
#include <linux/crypto.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/unaligned.h>
/*
* Emit the sbox as volatile const to prevent the compiler from doing
* constant folding on sbox references involving fixed indexes.
*/
static volatile const u8 ____cacheline_aligned aes_sbox[] = {
static const u8 ____cacheline_aligned aes_sbox[] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
@@ -48,7 +46,7 @@ static volatile const u8 ____cacheline_aligned aes_sbox[] = {
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
};
static volatile const u8 ____cacheline_aligned aes_inv_sbox[] = {
static const u8 ____cacheline_aligned aes_inv_sbox[] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
@@ -89,6 +87,110 @@ extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
EXPORT_SYMBOL(crypto_aes_sbox);
EXPORT_SYMBOL(crypto_aes_inv_sbox);
/* aes_enc_tab[i] contains MixColumn([SubByte(i), 0, 0, 0]). */
const u32 ____cacheline_aligned aes_enc_tab[256] = {
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 0x0df2f2ff, 0xbd6b6bd6,
0xb16f6fde, 0x54c5c591, 0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56,
0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec, 0x45caca8f, 0x9d82821f,
0x40c9c989, 0x877d7dfa, 0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb,
0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45, 0xbf9c9c23, 0xf7a4a453,
0x967272e4, 0x5bc0c09b, 0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c,
0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83, 0x5c343468, 0xf4a5a551,
0x34e5e5d1, 0x08f1f1f9, 0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d, 0x28181830, 0xa1969637,
0x0f05050a, 0xb59a9a2f, 0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df,
0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea, 0x1b090912, 0x9e83831d,
0x742c2c58, 0x2e1a1a34, 0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d, 0x7b292952, 0x3ee3e3dd,
0x712f2f5e, 0x97848413, 0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1,
0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6, 0xbe6a6ad4, 0x46cbcb8d,
0xd9bebe67, 0x4b393972, 0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed, 0xc5434386, 0xd74d4d9a,
0x55333366, 0x94858511, 0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe,
0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b, 0xf35151a2, 0xfea3a35d,
0xc0404080, 0x8a8f8f05, 0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1,
0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142, 0x30101020, 0x1affffe5,
0x0ef3f3fd, 0x6dd2d2bf, 0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3,
0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e, 0x57c4c493, 0xf2a7a755,
0x827e7efc, 0x473d3d7a, 0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3, 0x66222244, 0x7e2a2a54,
0xab90903b, 0x8388880b, 0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428,
0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad, 0x3be0e0db, 0x56323264,
0x4e3a3a74, 0x1e0a0a14, 0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8,
0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4, 0xa8919139, 0xa4959531,
0x37e4e4d3, 0x8b7979f2, 0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda,
0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949, 0xb46c6cd8, 0xfa5656ac,
0x07f4f4f3, 0x25eaeacf, 0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c, 0x241c1c38, 0xf1a6a657,
0xc7b4b473, 0x51c6c697, 0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e,
0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f, 0x907070e0, 0x423e3e7c,
0xc4b5b571, 0xaa6666cc, 0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c,
0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969, 0x91868617, 0x58c1c199,
0x271d1d3a, 0xb99e9e27, 0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122,
0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433, 0xb69b9b2d, 0x221e1e3c,
0x92878715, 0x20e9e9c9, 0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a, 0xdabfbf65, 0x31e6e6d7,
0xc6424284, 0xb86868d0, 0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e,
0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
};
EXPORT_SYMBOL(aes_enc_tab);
/* aes_dec_tab[i] contains InvMixColumn([InvSubByte(i), 0, 0, 0]). */
const u32 ____cacheline_aligned aes_dec_tab[256] = {
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, 0xcb6bab3b, 0xf1459d1f,
0xab58faac, 0x9303e34b, 0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5,
0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5, 0x495ab1de, 0x671bba25,
0x980eea45, 0xe1c0fe5d, 0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b,
0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295, 0x2d83bed4, 0xd3217458,
0x2969e049, 0x44c8c98e, 0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927,
0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d, 0x184adf63, 0x82311ae5,
0x60335197, 0x457f5362, 0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9,
0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52, 0x23d373ab, 0xe2024b72,
0x578f1fe3, 0x2aab5566, 0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3,
0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed, 0x2b1ccf8a, 0x92b479a7,
0xf0f207f3, 0xa1e2694e, 0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4,
0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4, 0x39ec830b, 0xaaef6040,
0x069f715e, 0x51106ebd, 0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d,
0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060, 0x24fb9819, 0x97e9bdd6,
0xcc434089, 0x779ed967, 0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879,
0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000, 0x83868009, 0x48ed2b32,
0xac70111e, 0x4e725a6c, 0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36,
0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624, 0xb1670a0c, 0x0fe75793,
0xd296eeb4, 0x9e919b1b, 0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c,
0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12, 0x0b0d090e, 0xadc78bf2,
0xb9a8b62d, 0xc8a91e14, 0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3,
0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b, 0x7629438b, 0xdcc623cb,
0x68fcedb6, 0x63f1e4b8, 0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684,
0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7, 0x4b2f9e1d, 0xf330b2dc,
0xec52860d, 0xd0e3c177, 0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947,
0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322, 0xc74e4987, 0xc1d138d9,
0xfea2ca8c, 0x360bd498, 0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f,
0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54, 0xc2138df6, 0xe8b8d890,
0x5ef7392e, 0xf5afc382, 0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf,
0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb, 0x097826cd, 0xf418596e,
0x01b79aec, 0xa89a4f83, 0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef,
0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029, 0xafb2a431, 0x31233f2a,
0x3094a5c6, 0xc066a235, 0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733,
0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117, 0x8dd64d76, 0x4db0ef43,
0x544daacc, 0xdf0496e4, 0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546,
0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb, 0x5a1d67b3, 0x52d2db92,
0x335610e9, 0x1347d66d, 0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb,
0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a, 0x59dfd29c, 0x3f73f255,
0x79ce1418, 0xbf37c773, 0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478,
0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2, 0x72c31d16, 0x0c25e2bc,
0x8b493c28, 0x41950dff, 0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664,
0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0,
};
EXPORT_SYMBOL(aes_dec_tab);
/* Prefetch data into L1 cache. @mem should be cacheline-aligned. */
static __always_inline void aes_prefetch(const void *mem, size_t len)
{
for (size_t i = 0; i < len; i += L1_CACHE_BYTES)
*(volatile const u8 *)(mem + i);
barrier();
}
static u32 mul_by_x(u32 w)
{
u32 x = w & 0x7f7f7f7f;
@@ -145,22 +247,6 @@ static u32 inv_mix_columns(u32 x)
return mix_columns(x ^ y ^ ror32(y, 16));
}
static __always_inline u32 subshift(u32 in[], int pos)
{
return (aes_sbox[in[pos] & 0xff]) ^
(aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
(aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
(aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
}
static __always_inline u32 inv_subshift(u32 in[], int pos)
{
return (aes_inv_sbox[in[pos] & 0xff]) ^
(aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
(aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
(aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
}
static u32 subw(u32 in)
{
return (aes_sbox[in & 0xff]) ^
@@ -169,38 +255,17 @@ static u32 subw(u32 in)
(aes_sbox[(in >> 24) & 0xff] << 24);
}
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
* The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
* key schedule plus a 16 bytes key which is used before the first round).
* The decryption key is prepared for the "Equivalent Inverse Cipher" as
* described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
* for the initial combination, the second slot for the first round and so on.
*/
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
static void aes_expandkey_generic(u32 rndkeys[], u32 *inv_rndkeys,
const u8 *in_key, int key_len)
{
u32 kwords = key_len / sizeof(u32);
u32 rc, i, j;
int err;
err = aes_check_keylen(key_len);
if (err)
return err;
ctx->key_length = key_len;
for (i = 0; i < kwords; i++)
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
rndkeys[i] = get_unaligned_le32(&in_key[i * sizeof(u32)]);
for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rki = &rndkeys[i * kwords];
u32 *rko = rki + kwords;
rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
@@ -229,129 +294,239 @@ int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
* the Inverse Mix Columns transformation to all but the first and
* the last one.
*/
ctx->key_dec[0] = ctx->key_enc[key_len + 24];
ctx->key_dec[1] = ctx->key_enc[key_len + 25];
ctx->key_dec[2] = ctx->key_enc[key_len + 26];
ctx->key_dec[3] = ctx->key_enc[key_len + 27];
if (inv_rndkeys) {
inv_rndkeys[0] = rndkeys[key_len + 24];
inv_rndkeys[1] = rndkeys[key_len + 25];
inv_rndkeys[2] = rndkeys[key_len + 26];
inv_rndkeys[3] = rndkeys[key_len + 27];
for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
inv_rndkeys[i] = inv_mix_columns(rndkeys[j]);
inv_rndkeys[i + 1] = inv_mix_columns(rndkeys[j + 1]);
inv_rndkeys[i + 2] = inv_mix_columns(rndkeys[j + 2]);
inv_rndkeys[i + 3] = inv_mix_columns(rndkeys[j + 3]);
}
inv_rndkeys[i] = rndkeys[0];
inv_rndkeys[i + 1] = rndkeys[1];
inv_rndkeys[i + 2] = rndkeys[2];
inv_rndkeys[i + 3] = rndkeys[3];
}
}
ctx->key_dec[i] = ctx->key_enc[0];
ctx->key_dec[i + 1] = ctx->key_enc[1];
ctx->key_dec[i + 2] = ctx->key_enc[2];
ctx->key_dec[i + 3] = ctx->key_enc[3];
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
if (aes_check_keylen(key_len) != 0)
return -EINVAL;
ctx->key_length = key_len;
aes_expandkey_generic(ctx->key_enc, ctx->key_dec, in_key, key_len);
return 0;
}
EXPORT_SYMBOL(aes_expandkey);
/**
* aes_encrypt - Encrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the ciphertext
* @in: Buffer containing the plaintext
*/
void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
static __always_inline u32 enc_quarterround(const u32 w[4], int i, u32 rk)
{
const u32 *rkp = ctx->key_enc + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
return rk ^ aes_enc_tab[(u8)w[i]] ^
rol32(aes_enc_tab[(u8)(w[(i + 1) % 4] >> 8)], 8) ^
rol32(aes_enc_tab[(u8)(w[(i + 2) % 4] >> 16)], 16) ^
rol32(aes_enc_tab[(u8)(w[(i + 3) % 4] >> 24)], 24);
}
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
static __always_inline u32 enclast_quarterround(const u32 w[4], int i, u32 rk)
{
return rk ^ ((aes_enc_tab[(u8)w[i]] & 0x0000ff00) >> 8) ^
(aes_enc_tab[(u8)(w[(i + 1) % 4] >> 8)] & 0x0000ff00) ^
((aes_enc_tab[(u8)(w[(i + 2) % 4] >> 16)] & 0x0000ff00) << 8) ^
((aes_enc_tab[(u8)(w[(i + 3) % 4] >> 24)] & 0x0000ff00) << 16);
}
static void __maybe_unused aes_encrypt_generic(const u32 rndkeys[], int nrounds,
u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
const u32 *rkp = rndkeys;
int n = nrounds - 1;
u32 w[4];
w[0] = get_unaligned_le32(&in[0]) ^ *rkp++;
w[1] = get_unaligned_le32(&in[4]) ^ *rkp++;
w[2] = get_unaligned_le32(&in[8]) ^ *rkp++;
w[3] = get_unaligned_le32(&in[12]) ^ *rkp++;
/*
* Force the compiler to emit data independent Sbox references,
* by xoring the input with Sbox values that are known to add up
* to zero. This pulls the entire Sbox into the D-cache before any
* data dependent lookups are done.
* Prefetch the table before doing data and key-dependent loads from it.
*
* This is intended only as a basic constant-time hardening measure that
* avoids interfering with performance too much. Its effectiveness is
* not guaranteed. For proper constant-time AES, a CPU that supports
* AES instructions should be used instead.
*/
st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
aes_prefetch(aes_enc_tab, sizeof(aes_enc_tab));
for (round = 0;; round += 2, rkp += 8) {
st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
do {
u32 w0 = enc_quarterround(w, 0, *rkp++);
u32 w1 = enc_quarterround(w, 1, *rkp++);
u32 w2 = enc_quarterround(w, 2, *rkp++);
u32 w3 = enc_quarterround(w, 3, *rkp++);
if (round == rounds - 2)
break;
w[0] = w0;
w[1] = w1;
w[2] = w2;
w[3] = w3;
} while (--n);
st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
}
put_unaligned_le32(enclast_quarterround(w, 0, *rkp++), &out[0]);
put_unaligned_le32(enclast_quarterround(w, 1, *rkp++), &out[4]);
put_unaligned_le32(enclast_quarterround(w, 2, *rkp++), &out[8]);
put_unaligned_le32(enclast_quarterround(w, 3, *rkp++), &out[12]);
}
put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
static __always_inline u32 dec_quarterround(const u32 w[4], int i, u32 rk)
{
return rk ^ aes_dec_tab[(u8)w[i]] ^
rol32(aes_dec_tab[(u8)(w[(i + 3) % 4] >> 8)], 8) ^
rol32(aes_dec_tab[(u8)(w[(i + 2) % 4] >> 16)], 16) ^
rol32(aes_dec_tab[(u8)(w[(i + 1) % 4] >> 24)], 24);
}
static __always_inline u32 declast_quarterround(const u32 w[4], int i, u32 rk)
{
return rk ^ aes_inv_sbox[(u8)w[i]] ^
((u32)aes_inv_sbox[(u8)(w[(i + 3) % 4] >> 8)] << 8) ^
((u32)aes_inv_sbox[(u8)(w[(i + 2) % 4] >> 16)] << 16) ^
((u32)aes_inv_sbox[(u8)(w[(i + 1) % 4] >> 24)] << 24);
}
static void __maybe_unused aes_decrypt_generic(const u32 inv_rndkeys[],
int nrounds,
u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
const u32 *rkp = inv_rndkeys;
int n = nrounds - 1;
u32 w[4];
w[0] = get_unaligned_le32(&in[0]) ^ *rkp++;
w[1] = get_unaligned_le32(&in[4]) ^ *rkp++;
w[2] = get_unaligned_le32(&in[8]) ^ *rkp++;
w[3] = get_unaligned_le32(&in[12]) ^ *rkp++;
aes_prefetch(aes_dec_tab, sizeof(aes_dec_tab));
do {
u32 w0 = dec_quarterround(w, 0, *rkp++);
u32 w1 = dec_quarterround(w, 1, *rkp++);
u32 w2 = dec_quarterround(w, 2, *rkp++);
u32 w3 = dec_quarterround(w, 3, *rkp++);
w[0] = w0;
w[1] = w1;
w[2] = w2;
w[3] = w3;
} while (--n);
aes_prefetch(aes_inv_sbox, sizeof(aes_inv_sbox));
put_unaligned_le32(declast_quarterround(w, 0, *rkp++), &out[0]);
put_unaligned_le32(declast_quarterround(w, 1, *rkp++), &out[4]);
put_unaligned_le32(declast_quarterround(w, 2, *rkp++), &out[8]);
put_unaligned_le32(declast_quarterround(w, 3, *rkp++), &out[12]);
}
/*
* Note: the aes_prepare*key_* names reflect the fact that the implementation
* might not actually expand the key. (The s390 code for example doesn't.)
* Where the key is expanded we use the more specific names aes_expandkey_*.
*
* aes_preparekey_arch() is passed an optional pointer 'inv_k' which points to
* the area to store the prepared decryption key. It will be NULL if the user
* is requesting encryption-only. aes_preparekey_arch() is also passed a valid
* 'key_len' and 'nrounds', corresponding to AES-128, AES-192, or AES-256.
*/
#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
/* An arch-specific implementation of AES is available. Include it. */
#include "aes.h" /* $(SRCARCH)/aes.h */
#else
/* No arch-specific implementation of AES is available. Use generic code. */
static void aes_preparekey_arch(union aes_enckey_arch *k,
union aes_invkey_arch *inv_k,
const u8 *in_key, int key_len, int nrounds)
{
aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
in_key, key_len);
}
static void aes_encrypt_arch(const struct aes_enckey *key,
u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
}
static void aes_decrypt_arch(const struct aes_key *key,
u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds, out, in);
}
#endif
static int __aes_preparekey(struct aes_enckey *enc_key,
union aes_invkey_arch *inv_k,
const u8 *in_key, size_t key_len)
{
if (aes_check_keylen(key_len) != 0)
return -EINVAL;
enc_key->len = key_len;
enc_key->nrounds = 6 + key_len / 4;
aes_preparekey_arch(&enc_key->k, inv_k, in_key, key_len,
enc_key->nrounds);
return 0;
}
int aes_preparekey(struct aes_key *key, const u8 *in_key, size_t key_len)
{
return __aes_preparekey((struct aes_enckey *)key, &key->inv_k,
in_key, key_len);
}
EXPORT_SYMBOL(aes_preparekey);
int aes_prepareenckey(struct aes_enckey *key, const u8 *in_key, size_t key_len)
{
return __aes_preparekey(key, NULL, in_key, key_len);
}
EXPORT_SYMBOL(aes_prepareenckey);
void aes_encrypt(aes_encrypt_arg key, u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
aes_encrypt_arch(key.enc_key, out, in);
}
EXPORT_SYMBOL(aes_encrypt);
/**
* aes_decrypt - Decrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the plaintext
* @in: Buffer containing the ciphertext
*/
void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
void aes_decrypt(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
const u8 in[AES_BLOCK_SIZE])
{
const u32 *rkp = ctx->key_dec + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
/*
* Force the compiler to emit data independent Sbox references,
* by xoring the input with Sbox values that are known to add up
* to zero. This pulls the entire Sbox into the D-cache before any
* data dependent lookups are done.
*/
st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
for (round = 0;; round += 2, rkp += 8) {
st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
if (round == rounds - 2)
break;
st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
}
put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
aes_decrypt_arch(key, out, in);
}
EXPORT_SYMBOL(aes_decrypt);
MODULE_DESCRIPTION("Generic AES library");
#ifdef aes_mod_init_arch
static int __init aes_mod_init(void)
{
aes_mod_init_arch();
return 0;
}
subsys_initcall(aes_mod_init);
static void __exit aes_mod_exit(void)
{
}
module_exit(aes_mod_exit);
#endif
MODULE_DESCRIPTION("AES block cipher");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_AUTHOR("Eric Biggers <ebiggers@kernel.org>");
MODULE_LICENSE("GPL v2");

Some files were not shown because too many files have changed in this diff Show More