mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

Make the architecture-optimized CRC code do its CPU feature checks in subsys_initcalls instead of arch_initcalls. This makes it consistent with arch/*/lib/crypto/ and ensures that it runs after initcalls that possibly could be a prerequisite for kernel-mode FPU, such as x86's xfd_update_static_branch() and loongarch's init_euen_mask(). Note: as far as I can tell, x86's xfd_update_static_branch() isn't *actually* needed for kernel-mode FPU. loongarch's init_euen_mask() is needed to enable save/restore of the vector registers, but loongarch doesn't yet have any CRC or crypto code that uses vector registers anyway. Regardless, let's be consistent with arch/*/lib/crypto/ and robust against any potential future dependency on an arch_initcall. Link: https://lore.kernel.org/r/20250510035959.87995-1-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@google.com>
94 lines
2.1 KiB
C
94 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* CRC32c (Castagnoli), sparc64 crc32c opcode accelerated
|
|
*
|
|
* This is based largely upon arch/x86/crypto/crc32c-intel.c
|
|
*
|
|
* Copyright (C) 2008 Intel Corporation
|
|
* Authors: Austin Zhang <austin_zhang@linux.intel.com>
|
|
* Kent Liu <kent.liu@intel.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/crc32.h>
|
|
#include <asm/pstate.h>
|
|
#include <asm/elf.h>
|
|
|
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode);
|
|
|
|
u32 crc32_le_arch(u32 crc, const u8 *data, size_t len)
|
|
{
|
|
return crc32_le_base(crc, data, len);
|
|
}
|
|
EXPORT_SYMBOL(crc32_le_arch);
|
|
|
|
void crc32c_sparc64(u32 *crcp, const u64 *data, size_t len);
|
|
|
|
u32 crc32c_arch(u32 crc, const u8 *data, size_t len)
|
|
{
|
|
size_t n = -(uintptr_t)data & 7;
|
|
|
|
if (!static_branch_likely(&have_crc32c_opcode))
|
|
return crc32c_base(crc, data, len);
|
|
|
|
if (n) {
|
|
/* Data isn't 8-byte aligned. Align it. */
|
|
n = min(n, len);
|
|
crc = crc32c_base(crc, data, n);
|
|
data += n;
|
|
len -= n;
|
|
}
|
|
n = len & ~7U;
|
|
if (n) {
|
|
crc32c_sparc64(&crc, (const u64 *)data, n);
|
|
data += n;
|
|
len -= n;
|
|
}
|
|
if (len)
|
|
crc = crc32c_base(crc, data, len);
|
|
return crc;
|
|
}
|
|
EXPORT_SYMBOL(crc32c_arch);
|
|
|
|
u32 crc32_be_arch(u32 crc, const u8 *data, size_t len)
|
|
{
|
|
return crc32_be_base(crc, data, len);
|
|
}
|
|
EXPORT_SYMBOL(crc32_be_arch);
|
|
|
|
static int __init crc32_sparc_init(void)
|
|
{
|
|
unsigned long cfr;
|
|
|
|
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
|
|
return 0;
|
|
|
|
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
|
|
if (!(cfr & CFR_CRC32C))
|
|
return 0;
|
|
|
|
static_branch_enable(&have_crc32c_opcode);
|
|
pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
|
|
return 0;
|
|
}
|
|
subsys_initcall(crc32_sparc_init);
|
|
|
|
static void __exit crc32_sparc_exit(void)
|
|
{
|
|
}
|
|
module_exit(crc32_sparc_exit);
|
|
|
|
u32 crc32_optimizations(void)
|
|
{
|
|
if (static_key_enabled(&have_crc32c_opcode))
|
|
return CRC32C_OPTIMIZATION;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(crc32_optimizations);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
|