2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00
linux/include/crypto/internal/sha2.h
Arnd Bergmann 64f7548aad lib/crypto: sha256: Mark sha256_choose_blocks as __always_inline
When the compiler chooses to not inline sha256_choose_blocks() in
the purgatory code, it fails to link against the missing CPU
specific version:

x86_64-linux-ld: arch/x86/purgatory/purgatory.ro: in function `sha256_choose_blocks.part.0':
sha256.c:(.text+0x6a6): undefined reference to `irq_fpu_usable'
sha256.c:(.text+0x6c7): undefined reference to `sha256_blocks_arch'
sha256.c:(.text+0x6cc): undefined reference to `sha256_blocks_simd'

Mark this function as __always_inline to prevent this, same as sha256_finup().

Fixes: 5b90a779bc ("crypto: lib/sha256 - Add helpers for block-based shash")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20250620191952.1867578-1-arnd@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
2025-06-20 13:22:03 -07:00

67 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _CRYPTO_INTERNAL_SHA2_H
#define _CRYPTO_INTERNAL_SHA2_H
#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <linux/compiler_attributes.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/unaligned.h>
#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256)
bool sha256_is_arch_optimized(void);
#else
static inline bool sha256_is_arch_optimized(void)
{
return false;
}
#endif
void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
static __always_inline void sha256_choose_blocks(
u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
bool force_generic, bool force_simd)
{
if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
sha256_blocks_generic(state, data, nblocks);
else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
(force_simd || crypto_simd_usable()))
sha256_blocks_simd(state, data, nblocks);
else
sha256_blocks_arch(state, data, nblocks);
}
static __always_inline void sha256_finup(
struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
bool force_generic, bool force_simd)
{
const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
__be64 *bits = (__be64 *)&buf[bit_offset];
int i;
buf[len++] = 0x80;
if (len > bit_offset) {
memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
sha256_choose_blocks(sctx->state, buf, 1, force_generic,
force_simd);
len = 0;
}
memset(&buf[len], 0, bit_offset - len);
*bits = cpu_to_be64(sctx->count << 3);
sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
for (i = 0; i < digest_size; i += 4)
put_unaligned_be32(sctx->state[i / 4], out + i);
}
#endif /* _CRYPTO_INTERNAL_SHA2_H */