From 9f77b4c5c38c5734c45fe6223d57e48715dbffb8 Mon Sep 17 00:00:00 2001 From: Florian Schmaus Date: Sun, 25 Jan 2026 21:09:52 -0700 Subject: [PATCH 01/42] riscv: mm: define copy_user_page() as copy_page() Currently, the implementation of copy_user_page() is identical to copy_page(). Align riscv with other architectures (alpha, arc, arm64, hexagon, longarch, m68k, openrisc, s390, um, xtensa) and map copy_user_page() to copy_page() given that their implementation is identical. In addition to following a common pattern, this centralizes the implementation. Any changes to the underlying page copy logic (e.g., for CHERI) will now automatically propagate to copy_user_page(). Signed-off-by: Florian Schmaus Link: https://patch.msgid.link/20260113134025.905627-1-florian.schmaus@codasip.com Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/page.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index ffe213ad65a4..3c517bc9eac5 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -51,8 +51,7 @@ void clear_page(void *page); #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) #define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr) -#define copy_user_page(vto, vfrom, vaddr, topg) \ - memcpy((vto), (vfrom), PAGE_SIZE) +#define copy_user_page(vto, vfrom, vaddr, topg) copy_page(vto, vfrom) /* * Use struct definitions to apply C type checking From 5bfb287891d455a6ac9209d58873092044b0871c Mon Sep 17 00:00:00 2001 From: Javier Carrasco Date: Sun, 25 Jan 2026 21:09:52 -0700 Subject: [PATCH 02/42] riscv: defconfig: enable NLS_ISO8859_1 NLS_ISO8859_1 was enabled as a module with commit efe1e08bca9a ("riscv: defconfig: enable NLS_CODEPAGE_437, NLS_ISO8859_1"), but the NLS_CODEPAGE_437 counterpart is selected as built-in. The commit does not explain the reason behind, and it is not consistent with the defconfig for ARM64 that also enables these modules to mount EFI system partitions. Select NLS_ISO8859_1 as built-in to provide both requirements within the kernel image. Signed-off-by: Javier Carrasco Link: https://patch.msgid.link/20260111-nls_iso8859_1_y_riscv-v1-1-2c992bb2c00d@gmail.com Signed-off-by: Paul Walmsley --- arch/riscv/configs/defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index cd736a1d657e..5ca534a51809 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -295,7 +295,7 @@ CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y CONFIG_9P_FS=y CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_1=y CONFIG_SECURITY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_APPARMOR=y From 3127718ad9552c9ca90e6eab37cd504c6806bfe4 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 25 Jan 2026 21:09:52 -0700 Subject: [PATCH 03/42] iommu/riscv: Simplify maximum determination in riscv_iommu_init_check() Reduce nested max() calls by a single max3() call in this function implementation. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://patch.msgid.link/d1a384c9-f154-4537-94d6-c3613f4167bc@web.de Signed-off-by: Paul Walmsley --- drivers/iommu/riscv/iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index d9429097a2b5..2d8fb0859f30 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -1593,10 +1593,10 @@ static int riscv_iommu_init_check(struct riscv_iommu_device *iommu) FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count); riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec); iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC); - if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), - FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)), - max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), - FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) + if (max3(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), + FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec), + max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), + FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) return -EINVAL; return 0; From 98036587a475f371005e42d3fff9feb9cf713880 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:52 -0700 Subject: [PATCH 04/42] mm: add VM_SHADOW_STACK definition for riscv VM_HIGH_ARCH_5 is used for riscv. Reviewed-by: Zong Li Reviewed-by: Alexandre Ghiti Acked-by: David Hildenbrand Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-1-b55691eacf4f@rivosinc.com [pjw@kernel.org: clarify subject; update to apply] Signed-off-by: Paul Walmsley --- include/linux/mm.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index f0d5be9dc736..26c8a0c3fbaa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -359,7 +359,7 @@ enum { DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2), DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3), DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4), -#if defined(CONFIG_X86_USER_SHADOW_STACK) +#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_RISCV_USER_CFI) /* * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of * support core mm. @@ -460,7 +460,8 @@ enum { #define VM_PKEY_BIT4 VM_NONE #endif /* CONFIG_ARCH_PKEY_BITS > 4 */ #endif /* CONFIG_ARCH_HAS_PKEYS */ -#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) +#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \ + defined(CONFIG_RISCV_USER_CFI) #define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK) #else #define VM_SHADOW_STACK VM_NONE From f94645fc03b864cf0823548c87a455346e87c22c Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 05/42] dt-bindings: riscv: document zicfilp and zicfiss in extensions.yaml Make an entry for cfi extensions in extensions.yaml. Signed-off-by: Deepak Gupta Acked-by: Rob Herring (Arm) Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-2-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated subject] Signed-off-by: Paul Walmsley --- .../devicetree/bindings/riscv/extensions.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index 5bab356addc8..1aeff17c7f24 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -469,6 +469,20 @@ properties: The standard Zicboz extension for cache-block zeroing as ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs. + - const: zicfilp + description: | + The standard Zicfilp extension for enforcing forward edge + control-flow integrity as ratified in commit 3f8e450 ("merge + pull request #227 from ved-rivos/0709") of riscv-cfi + github repo. + + - const: zicfiss + description: | + The standard Zicfiss extension for enforcing backward edge + control-flow integrity as ratified in commit 3f8e450 ("merge + pull request #227 from ved-rivos/0709") of riscv-cfi + github repo. + - const: zicntr description: The standard Zicntr extension for base counters and timers, as From df11708566d7458d1ce11eb28a59ef6a42ee5236 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 06/42] riscv: zicfiss / zicfilp enumeration This patch adds support for detecting the RISC-V ISA extensions Zicfiss and Zicfilp. Zicfiss and Zicfilp stand for the unprivileged integer spec extensions for shadow stack and indirect branch tracking, respectively. This patch looks for Zicfiss and Zicfilp in the device tree and accordingly lights up the corresponding bits in the cpu feature bitmap. Furthermore this patch adds detection utility functions to return whether shadow stack or landing pads are supported by the cpu. Reviewed-by: Zong Li Reviewed-by: Alexandre Ghiti Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-3-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated to apply; cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/cpufeature.h | 12 ++++++++++++ arch/riscv/include/asm/hwcap.h | 2 ++ arch/riscv/kernel/cpufeature.c | 22 ++++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 62837fa981e8..739fcc84bf7b 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -152,4 +152,16 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); } +static inline bool cpu_supports_shadow_stack(void) +{ + return (IS_ENABLED(CONFIG_RISCV_USER_CFI) && + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICFISS)); +} + +static inline bool cpu_supports_indirect_br_lp_instr(void) +{ + return (IS_ENABLED(CONFIG_RISCV_USER_CFI) && + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICFILP)); +} + #endif diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 4369a2338541..7ef8e5f55c8d 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -110,6 +110,8 @@ #define RISCV_ISA_EXT_ZALASR 101 #define RISCV_ISA_EXT_ZILSD 102 #define RISCV_ISA_EXT_ZCLSD 103 +#define RISCV_ISA_EXT_ZICFILP 104 +#define RISCV_ISA_EXT_ZICFISS 105 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index fa591aff9d33..6827a577f870 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -296,6 +296,24 @@ static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, return 0; } +static int riscv_cfilp_validate(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ + if (!IS_ENABLED(CONFIG_RISCV_USER_CFI)) + return -EINVAL; + + return 0; +} + +static int riscv_cfiss_validate(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ + if (!IS_ENABLED(CONFIG_RISCV_USER_CFI)) + return -EINVAL; + + return 0; +} + static const unsigned int riscv_a_exts[] = { RISCV_ISA_EXT_ZAAMO, RISCV_ISA_EXT_ZALRSC, @@ -482,6 +500,10 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA_VALIDATE(zicbop, RISCV_ISA_EXT_ZICBOP, riscv_ext_zicbop_validate), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate), __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), + __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicfilp, RISCV_ISA_EXT_ZICFILP, riscv_xlinuxenvcfg_exts, + riscv_cfilp_validate), + __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicfiss, RISCV_ISA_EXT_ZICFISS, riscv_xlinuxenvcfg_exts, + riscv_cfiss_validate), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), From 41a2452c99f327f2b57811e63f6d2497a4a96a9f Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 07/42] riscv: add Zicfiss / Zicfilp extension CSR and bit definitions The Zicfiss and Zicfilp extensions are enabled via b3 and b2 in *envcfg CSRs. menvcfg controls enabling for S/HS mode. henvcfg controls enabling for VS. senvcfg controls enabling for U/VU mode. The Zicfilp extension extends *status CSRs to hold an 'expected landing pad' bit. A trap or interrupt can occur between an indirect jmp/call and target instruction. The 'expected landing pad' bit from the CPU is recorded into the xstatus CSR so that when the supervisor performs xret, the 'expected landing pad' state of the CPU can be restored. Zicfiss adds one new CSR, CSR_SSP, which contains the current shadow stack pointer. Signed-off-by: Deepak Gupta Reviewed-by: Charlie Jenkins Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-4-b55691eacf4f@rivosinc.com [pjw@kernel.org: grouped CSR_SSP macro with the other CSR macros; clarified patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/csr.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 4a37a98398ad..55a07e8722ff 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -18,6 +18,15 @@ #define SR_MPP _AC(0x00001800, UL) /* Previously Machine */ #define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ +/* zicfilp landing pad status bit */ +#define SR_SPELP _AC(0x00800000, UL) +#define SR_MPELP _AC(0x020000000000, UL) +#ifdef CONFIG_RISCV_M_MODE +#define SR_ELP SR_MPELP +#else +#define SR_ELP SR_SPELP +#endif + #define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ #define SR_FS_OFF _AC(0x00000000, UL) #define SR_FS_INITIAL _AC(0x00002000, UL) @@ -212,6 +221,8 @@ #define ENVCFG_PMM_PMLEN_16 (_AC(0x3, ULL) << 32) #define ENVCFG_CBZE (_AC(1, UL) << 7) #define ENVCFG_CBCFE (_AC(1, UL) << 6) +#define ENVCFG_LPE (_AC(1, UL) << 2) +#define ENVCFG_SSE (_AC(1, UL) << 3) #define ENVCFG_CBIE_SHIFT 4 #define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT) #define ENVCFG_CBIE_ILL _AC(0x0, UL) @@ -321,6 +332,9 @@ #define CSR_STIMECMP 0x14D #define CSR_STIMECMPH 0x15D +/* zicfiss user mode csr. CSR_SSP holds current shadow stack pointer */ +#define CSR_SSP 0x011 + /* xtheadvector symbolic CSR names */ #define CSR_VXSAT 0x9 #define CSR_VXRM 0xa From 79dd4f2f40d0414aab670c46b801b11850306448 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 08/42] riscv: Add usercfi state for task and save/restore of CSR_SSP on trap entry/exit Carve out space in the RISC-V architecture-specific thread struct for cfi status and shadow stack in usermode. This patch: - defines a new structure cfi_status with status bit for cfi feature - defines shadow stack pointer, base and size in cfi_status structure - defines offsets to new member fields in thread in asm-offsets.c - saves and restores shadow stack pointer on trap entry (U --> S) and exit (S --> U) Shadow stack save/restore is gated on feature availability and is implemented using alternatives. CSR_SSP can be context-switched in 'switch_to' as well, but as soon as kernel shadow stack support gets rolled in, the shadow stack pointer will need to be switched at trap entry/exit point (much like 'sp'). It can be argued that a kernel using a shadow stack deployment scenario may not be as prevalent as user mode using this feature. But even if there is some minimal deployment of kernel shadow stack, that means that it needs to be supported. Thus save/restore of shadow stack pointer is implemented in entry.S instead of in 'switch_to.h'. Reviewed-by: Charlie Jenkins Reviewed-by: Zong Li Reviewed-by: Alexandre Ghiti Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-5-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/thread_info.h | 3 +++ arch/riscv/include/asm/usercfi.h | 23 +++++++++++++++++++++ arch/riscv/kernel/asm-offsets.c | 4 ++++ arch/riscv/kernel/entry.S | 31 ++++++++++++++++++++++++++++ 5 files changed, 62 insertions(+) create mode 100644 arch/riscv/include/asm/usercfi.h diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index da5426122d28..4c3dd94d0f63 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -16,6 +16,7 @@ #include #include #include +#include #define arch_get_mmap_end(addr, len, flags) \ ({ \ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 836d80dd2921..36918c9200c9 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -73,6 +73,9 @@ struct thread_info { */ unsigned long a0, a1, a2; #endif +#ifdef CONFIG_RISCV_USER_CFI + struct cfi_state user_cfi_state; +#endif }; #ifdef CONFIG_SHADOW_CALL_STACK diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h new file mode 100644 index 000000000000..4c5233e8f3f9 --- /dev/null +++ b/arch/riscv/include/asm/usercfi.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2024 Rivos, Inc. + * Deepak Gupta + */ +#ifndef _ASM_RISCV_USERCFI_H +#define _ASM_RISCV_USERCFI_H + +#ifndef __ASSEMBLER__ +#include + +#ifdef CONFIG_RISCV_USER_CFI +struct cfi_state { + unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ + unsigned long user_shdw_stk; /* Current user shadow stack pointer */ + unsigned long shdw_stk_base; /* Base address of shadow stack */ + unsigned long shdw_stk_size; /* size of shadow stack */ +}; + +#endif /* CONFIG_RISCV_USER_CFI */ + +#endif /* __ASSEMBLER__ */ + +#endif /* _ASM_RISCV_USERCFI_H */ diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 7d42d3b8a32a..8a2b2656cb2f 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -51,6 +51,10 @@ void asm_offsets(void) #endif OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu); +#ifdef CONFIG_RISCV_USER_CFI + OFFSET(TASK_TI_CFI_STATE, task_struct, thread_info.user_cfi_state); + OFFSET(TASK_TI_USER_SSP, task_struct, thread_info.user_cfi_state.user_shdw_stk); +#endif OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 9b9dec6893b8..0f1394d71720 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -92,6 +92,35 @@ REG_L a0, TASK_TI_A0(tp) .endm +/* + * If previous mode was U, capture shadow stack pointer and save it away + * Zero CSR_SSP at the same time for sanitization. + */ +.macro save_userssp tmp, status + ALTERNATIVE("nops(4)", + __stringify( \ + andi \tmp, \status, SR_SPP; \ + bnez \tmp, skip_ssp_save; \ + csrrw \tmp, CSR_SSP, x0; \ + REG_S \tmp, TASK_TI_USER_SSP(tp); \ + skip_ssp_save:), + 0, + RISCV_ISA_EXT_ZICFISS, + CONFIG_RISCV_USER_CFI) +.endm + +.macro restore_userssp tmp, status + ALTERNATIVE("nops(4)", + __stringify( \ + andi \tmp, \status, SR_SPP; \ + bnez \tmp, skip_ssp_restore; \ + REG_L \tmp, TASK_TI_USER_SSP(tp); \ + csrw CSR_SSP, \tmp; \ + skip_ssp_restore:), + 0, + RISCV_ISA_EXT_ZICFISS, + CONFIG_RISCV_USER_CFI) +.endm SYM_CODE_START(handle_exception) /* @@ -148,6 +177,7 @@ SYM_CODE_START(handle_exception) REG_L s0, TASK_TI_USER_SP(tp) csrrc s1, CSR_STATUS, t0 + save_userssp s2, s1 csrr s2, CSR_EPC csrr s3, CSR_TVAL csrr s4, CSR_CAUSE @@ -243,6 +273,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception) call riscv_v_context_nesting_end #endif REG_L a0, PT_STATUS(sp) + restore_userssp s3, a0 /* * The current load reservation is effectively part of the processor's * state, in the sense that load reservations cannot be shared between From 6c7559f22b6fcb2d4b52c445434c71ade36b004c Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 09/42] riscv/mm: ensure PROT_WRITE leads to VM_READ | VM_WRITE 'arch_calc_vm_prot_bits' is implemented on risc-v to return VM_READ | VM_WRITE if PROT_WRITE is specified. Similarly 'riscv_sys_mmap' is updated to convert all incoming PROT_WRITE to (PROT_WRITE | PROT_READ). This is to make sure that any existing apps using PROT_WRITE still work. Earlier 'protection_map[VM_WRITE]' used to pick read-write PTE encodings. Now 'protection_map[VM_WRITE]' will always pick PAGE_SHADOWSTACK PTE encodings for shadow stack. The above changes ensure that existing apps continue to work because underneath, the kernel will be picking 'protection_map[VM_WRITE|VM_READ]' PTE encodings. Reviewed-by: Zong Li Reviewed-by: Alexandre Ghiti Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-6-b55691eacf4f@rivosinc.com Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/mman.h | 26 ++++++++++++++++++++++++++ arch/riscv/include/asm/pgtable.h | 1 + arch/riscv/kernel/sys_riscv.c | 10 ++++++++++ arch/riscv/mm/init.c | 2 +- 4 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/mman.h diff --git a/arch/riscv/include/asm/mman.h b/arch/riscv/include/asm/mman.h new file mode 100644 index 000000000000..0ad1d19832eb --- /dev/null +++ b/arch/riscv/include/asm/mman.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMAN_H__ +#define __ASM_MMAN_H__ + +#include +#include +#include +#include + +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, + unsigned long pkey __always_unused) +{ + unsigned long ret = 0; + + /* + * If PROT_WRITE was specified, force it to VM_READ | VM_WRITE. + * Only VM_WRITE means shadow stack. + */ + if (prot & PROT_WRITE) + ret = (VM_READ | VM_WRITE); + return ret; +} + +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) + +#endif /* ! __ASM_MMAN_H__ */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9acd58a67123..b384a1a9327e 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -178,6 +178,7 @@ extern struct pt_alloc_ops pt_ops __meminitdata; #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ _PAGE_EXEC | _PAGE_WRITE) +#define PAGE_SHADOWSTACK __pgprot(_PAGE_BASE | _PAGE_WRITE) #define PAGE_COPY PAGE_READ #define PAGE_COPY_EXEC PAGE_READ_EXEC diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 795b2e815ac9..22fc9b3268be 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -7,6 +7,7 @@ #include #include +#include static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,15 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + /* + * If PROT_WRITE is specified then extend that to PROT_READ + * protection_map[VM_WRITE] is now going to select shadow stack encodings. + * So specifying PROT_WRITE actually should select protection_map [VM_WRITE | VM_READ] + * If user wants to create shadow stack then they should use `map_shadow_stack` syscall. + */ + if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ))) + prot |= PROT_READ; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index addb8a9305be..25a8f693a765 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -376,7 +376,7 @@ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READ, - [VM_WRITE] = PAGE_COPY, + [VM_WRITE] = PAGE_SHADOWSTACK, [VM_WRITE | VM_READ] = PAGE_COPY, [VM_EXEC] = PAGE_EXEC, [VM_EXEC | VM_READ] = PAGE_READ_EXEC, From f56ffb8ada46aae61580905d93c31e4006572240 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 10/42] riscv/mm: manufacture shadow stack ptes This patch implements the creation of a shadow stack pte on riscv. Creating shadow stack PTE on riscv means clearing RWX and then setting W=1. Reviewed-by: Alexandre Ghiti Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-7-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/pgtable.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index b384a1a9327e..153a692a5640 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -456,6 +456,11 @@ static inline pte_t pte_mkwrite_novma(pte_t pte) return __pte(pte_val(pte) | _PAGE_WRITE); } +static inline pte_t pte_mkwrite_shstk(pte_t pte) +{ + return __pte((pte_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE); +} + /* static inline pte_t pte_mkexec(pte_t pte) */ static inline pte_t pte_mkdirty(pte_t pte) @@ -839,6 +844,11 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); } +static inline pmd_t pmd_mkwrite_shstk(pmd_t pte) +{ + return __pmd((pmd_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE); +} + static inline pmd_t pmd_wrprotect(pmd_t pmd) { return pte_pmd(pte_wrprotect(pmd_pte(pmd))); From c68c2ef9d64169317a6e0e6f0506953637760409 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:53 -0700 Subject: [PATCH 11/42] riscv/mm: teach pte_mkwrite to manufacture shadow stack PTEs pte_mkwrite() creates PTEs with WRITE encodings for the underlying architecture. The underlying architecture can have two types of writeable mappings: one that can be written using regular store instructions, and another one that can only be written using specialized store instructions (like shadow stack stores). pte_mkwrite can select write PTE encoding based on VMA range (i.e. VM_SHADOW_STACK) Reviewed-by: Alexandre Ghiti Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-8-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/pgtable.h | 7 +++++++ arch/riscv/mm/pgtable.c | 16 ++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 153a692a5640..7f81eecac243 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -451,6 +451,10 @@ static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) /* static inline pte_t pte_mkread(pte_t pte) */ +struct vm_area_struct; +pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma); +#define pte_mkwrite pte_mkwrite + static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) | _PAGE_WRITE); @@ -839,6 +843,9 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pte_pmd(pte_mkyoung(pmd_pte(pmd))); } +pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); +#define pmd_mkwrite pmd_mkwrite + static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) { return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index 807c0a0de182..e0ab04d721cc 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -163,3 +163,19 @@ pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, return old; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_SHADOW_STACK) + return pte_mkwrite_shstk(pte); + + return pte_mkwrite_novma(pte); +} + +pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_SHADOW_STACK) + return pmd_mkwrite_shstk(pmd); + + return pmd_mkwrite_novma(pmd); +} From 540de7ade1e1327119e367ec5f662a627b3fe9f9 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 12/42] riscv/mm: update write protect to work on shadow stacks 'fork' implements copy-on-write (COW) by making pages readonly in both child and parent. ptep_set_wrprotect() and pte_wrprotect() clear _PAGE_WRITE in PTE. The assumption is that the page is readable and, on a fault, copy-on-write happens. To implement COW on shadow stack pages, clearing the W bit makes them XWR = 000. This will result in the wrong PTE setting, which allows no permissions, but with V=1 and the PFN field pointing to the final page. Instead, the desired behavior is to turn it into a readable page, take an access (load/store) fault on sspush/sspop (shadow stack) and then perform COW on such pages. This way regular reads would still be allowed and not lead to COW maintaining current behavior of COW on non-shadow stack but writeable memory. On the other hand, this doesn't interfere with existing COW for read-write memory. The assumption is always that _PAGE_READ must have been set, and thus, setting _PAGE_READ is harmless. Reviewed-by: Alexandre Ghiti Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-9-b55691eacf4f@rivosinc.com [pjw@kernel.org: clarify patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/pgtable.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 7f81eecac243..1340aa398a74 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -411,7 +411,7 @@ static inline int pte_special(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte) { - return __pte(pte_val(pte) & ~(_PAGE_WRITE)); + return __pte((pte_val(pte) & ~(_PAGE_WRITE)) | (_PAGE_READ)); } #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP @@ -683,7 +683,15 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); + pte_t read_pte = READ_ONCE(*ptep); + /* + * ptep_set_wrprotect can be called for shadow stack ranges too. + * shadow stack memory is XWR = 010 and thus clearing _PAGE_WRITE will lead to + * encoding 000b which is wrong encoding with V = 1. This should lead to page fault + * but we dont want this wrong configuration to be set in page tables. + */ + atomic_long_set((atomic_long_t *)ptep, + ((pte_val(read_pte) & ~(unsigned long)_PAGE_WRITE) | _PAGE_READ)); } #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH From c70772afd5cc93c28f83b53d33ce9fbcd8d015da Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 13/42] riscv/mm: Implement map_shadow_stack() syscall As discussed extensively in the changelog for the addition of this syscall on x86 ("x86/shstk: Introduce map_shadow_stack syscall") the existing mmap() and madvise() syscalls do not map entirely well onto the security requirements for shadow stack memory since they lead to windows where memory is allocated but not yet protected or stacks which are not properly and safely initialised. Instead a new syscall map_shadow_stack() has been defined which allocates and initialises a shadow stack page. This patch implements this syscall for riscv. riscv doesn't require tokens to be setup by kernel because user mode can do that by itself. However to provide compatibility and portability with other architectues, user mode can specify token set flag. Signed-off-by: Deepak Gupta Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-10-b55691eacf4f@rivosinc.com Link: https://lore.kernel.org/linux-riscv/aXfRPJvoSsOW8AwM@debug.ba.rivosinc.com/ [pjw@kernel.org: added allocate_shadow_stack() fix per Deepak; fixed bug found by sparse] Signed-off-by: Paul Walmsley --- arch/riscv/kernel/Makefile | 1 + arch/riscv/kernel/usercfi.c | 140 ++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 arch/riscv/kernel/usercfi.c diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index a01f6439d62b..0a0a3fb02d63 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -126,3 +126,4 @@ obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += bugs.o +obj-$(CONFIG_RISCV_USER_CFI) += usercfi.o diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c new file mode 100644 index 000000000000..9717321d3bd6 --- /dev/null +++ b/arch/riscv/kernel/usercfi.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Rivos, Inc. + * Deepak Gupta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SHSTK_ENTRY_SIZE sizeof(void *) + +/* + * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen + * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to + * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow + * stack. + */ +static noinline unsigned long amo_user_shstk(unsigned long __user *addr, unsigned long val) +{ + /* + * Never expect -1 on shadow stack. Expect return addresses and zero + */ + unsigned long swap = -1; + + __enable_user_access(); + asm goto(".option push\n" + ".option arch, +zicfiss\n" + "1: ssamoswap.d %[swap], %[val], %[addr]\n" + _ASM_EXTABLE(1b, %l[fault]) + ".option pop\n" + : [swap] "=r" (swap), [addr] "+A" (*(__force unsigned long *)addr) + : [val] "r" (val) + : "memory" + : fault + ); + __disable_user_access(); + return swap; +fault: + __disable_user_access(); + return -1; +} + +/* + * Create a restore token on the shadow stack. A token is always XLEN wide + * and aligned to XLEN. + */ +static int create_rstor_token(unsigned long ssp, unsigned long *token_addr) +{ + unsigned long addr; + + /* Token must be aligned */ + if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE)) + return -EINVAL; + + /* On RISC-V we're constructing token to be function of address itself */ + addr = ssp - SHSTK_ENTRY_SIZE; + + if (amo_user_shstk((unsigned long __user *)addr, (unsigned long)ssp) == -1) + return -EFAULT; + + if (token_addr) + *token_addr = addr; + + return 0; +} + +static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size, + unsigned long token_offset, bool set_tok) +{ + int flags = MAP_ANONYMOUS | MAP_PRIVATE; + struct mm_struct *mm = current->mm; + unsigned long populate; + + if (addr) + flags |= MAP_FIXED_NOREPLACE; + + mmap_write_lock(mm); + addr = do_mmap(NULL, addr, size, PROT_READ, flags, + VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL); + mmap_write_unlock(mm); + + if (!set_tok || IS_ERR_VALUE(addr)) + goto out; + + if (create_rstor_token(addr + token_offset, NULL)) { + vm_munmap(addr, size); + return -EINVAL; + } + +out: + return addr; +} + +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) +{ + bool set_tok = flags & SHADOW_STACK_SET_TOKEN; + unsigned long aligned_size = 0; + + if (!cpu_supports_shadow_stack()) + return -EOPNOTSUPP; + + /* Anything other than set token should result in invalid param */ + if (flags & ~SHADOW_STACK_SET_TOKEN) + return -EINVAL; + + /* + * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is an available + * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction + * itself. This provides static property on register programming and writes to CSR can't + * be unintentional from programmer's perspective. As long as programmer has guarded areas + * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since + * CSR_SSP is writable by user mode, it itself can setup a shadow stack token subsequent + * to allocation. Although in order to provide portablity with other architectures (because + * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token + * flag in flags and if provided in flags, will setup a token at the base. + */ + + /* If there isn't space for a token */ + if (set_tok && size < SHSTK_ENTRY_SIZE) + return -ENOSPC; + + if (addr && (addr & (PAGE_SIZE - 1))) + return -EINVAL; + + aligned_size = PAGE_ALIGN(size); + if (aligned_size < size) + return -EOVERFLOW; + + return allocate_shadow_stack(addr, aligned_size, size, set_tok); +} From fd44a4a8551698757d0e7eeaa964735b471f7407 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 14/42] riscv/shstk: If needed allocate a new shadow stack on clone Userspace specifies CLONE_VM to share address space and spawn new thread. 'clone' allows userspace to specify a new stack for a new thread. However there is no way to specify a new shadow stack base address without changing the API. This patch allocates a new shadow stack whenever CLONE_VM is given. In case of CLONE_VFORK, the parent is suspended until the child finishes; thus the child can use the parent's shadow stack. In case of !CLONE_VM, COW kicks in because entire address space is copied from parent to child. 'clone3' is extensible and can provide mechanisms for specifying the shadow stack as an input parameter. This is not settled yet and is being extensively discussed on the mailing list. Once that's settled, this code should be adapted. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-11-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/mmu_context.h | 7 ++ arch/riscv/include/asm/usercfi.h | 25 ++++++ arch/riscv/kernel/process.c | 10 +++ arch/riscv/kernel/usercfi.c | 120 +++++++++++++++++++++++++++ 4 files changed, 162 insertions(+) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 8c4bc49a3a0f..dbf27a78df6c 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -48,6 +48,13 @@ static inline unsigned long mm_untag_mask(struct mm_struct *mm) } #endif +#define deactivate_mm deactivate_mm +static inline void deactivate_mm(struct task_struct *tsk, + struct mm_struct *mm) +{ + shstk_release(tsk); +} + #include #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index 4c5233e8f3f9..a16a5dff8b0e 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -8,6 +8,9 @@ #ifndef __ASSEMBLER__ #include +struct task_struct; +struct kernel_clone_args; + #ifdef CONFIG_RISCV_USER_CFI struct cfi_state { unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ @@ -16,6 +19,28 @@ struct cfi_state { unsigned long shdw_stk_size; /* size of shadow stack */ }; +unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args); +void shstk_release(struct task_struct *tsk); +void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size); +unsigned long get_shstk_base(struct task_struct *task, unsigned long *size); +void set_active_shstk(struct task_struct *task, unsigned long shstk_addr); +bool is_shstk_enabled(struct task_struct *task); + +#else + +#define shstk_alloc_thread_stack(tsk, args) 0 + +#define shstk_release(tsk) + +#define get_shstk_base(task, size) 0UL + +#define set_shstk_base(task, shstk_addr, size) do {} while (0) + +#define set_active_shstk(task, shstk_addr) do {} while (0) + +#define is_shstk_enabled(task) false + #endif /* CONFIG_RISCV_USER_CFI */ #endif /* __ASSEMBLER__ */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 31a392993cb4..72d35adc6e0e 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -31,6 +31,7 @@ #include #include #include +#include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include @@ -226,6 +227,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) u64 clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; + unsigned long ssp = 0; struct pt_regs *childregs = task_pt_regs(p); /* Ensure all threads in this mm have the same pointer masking mode. */ @@ -245,11 +247,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.s[1] = (unsigned long)args->fn_arg; p->thread.ra = (unsigned long)ret_from_fork_kernel_asm; } else { + /* allocate new shadow stack if needed. In case of CLONE_VM we have to */ + ssp = shstk_alloc_thread_stack(p, args); + if (IS_ERR_VALUE(ssp)) + return PTR_ERR((void *)ssp); + *childregs = *(current_pt_regs()); /* Turn off status.VS */ riscv_v_vstate_off(childregs); if (usp) /* User fork */ childregs->sp = usp; + /* if needed, set new ssp */ + if (ssp) + set_active_shstk(p, ssp); if (clone_flags & CLONE_SETTLS) childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index 9717321d3bd6..59cdd6f69d3f 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -19,6 +19,41 @@ #define SHSTK_ENTRY_SIZE sizeof(void *) +bool is_shstk_enabled(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ubcfi_en; +} + +void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size) +{ + task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr; + task->thread_info.user_cfi_state.shdw_stk_size = size; +} + +unsigned long get_shstk_base(struct task_struct *task, unsigned long *size) +{ + if (size) + *size = task->thread_info.user_cfi_state.shdw_stk_size; + return task->thread_info.user_cfi_state.shdw_stk_base; +} + +void set_active_shstk(struct task_struct *task, unsigned long shstk_addr) +{ + task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr; +} + +/* + * If size is 0, then to be compatible with regular stack we want it to be as big as + * regular stack. Else PAGE_ALIGN it and return back + */ +static unsigned long calc_shstk_size(unsigned long size) +{ + if (size) + return PAGE_ALIGN(size); + + return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G)); +} + /* * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to @@ -138,3 +173,88 @@ SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsi return allocate_shadow_stack(addr, aligned_size, size, set_tok); } + +/* + * This gets called during clone/clone3/fork. And is needed to allocate a shadow stack for + * cases where CLONE_VM is specified and thus a different stack is specified by user. We + * thus need a separate shadow stack too. How a separate shadow stack is specified by + * user is still being debated. Once that's settled, remove this part of the comment. + * This function simply returns 0 if shadow stacks are not supported or if separate shadow + * stack allocation is not needed (like in case of !CLONE_VM) + */ +unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args) +{ + unsigned long addr, size; + + /* If shadow stack is not supported, return 0 */ + if (!cpu_supports_shadow_stack()) + return 0; + + /* + * If shadow stack is not enabled on the new thread, skip any + * switch to a new shadow stack. + */ + if (!is_shstk_enabled(tsk)) + return 0; + + /* + * For CLONE_VFORK the child will share the parents shadow stack. + * Set base = 0 and size = 0, this is special means to track this state + * so the freeing logic run for child knows to leave it alone. + */ + if (args->flags & CLONE_VFORK) { + set_shstk_base(tsk, 0, 0); + return 0; + } + + /* + * For !CLONE_VM the child will use a copy of the parents shadow + * stack. + */ + if (!(args->flags & CLONE_VM)) + return 0; + + /* + * reaching here means, CLONE_VM was specified and thus a separate shadow + * stack is needed for new cloned thread. Note: below allocation is happening + * using current mm. + */ + size = calc_shstk_size(args->stack_size); + addr = allocate_shadow_stack(0, size, 0, false); + if (IS_ERR_VALUE(addr)) + return addr; + + set_shstk_base(tsk, addr, size); + + return addr + size; +} + +void shstk_release(struct task_struct *tsk) +{ + unsigned long base = 0, size = 0; + /* If shadow stack is not supported or not enabled, nothing to release */ + if (!cpu_supports_shadow_stack() || !is_shstk_enabled(tsk)) + return; + + /* + * When fork() with CLONE_VM fails, the child (tsk) already has a + * shadow stack allocated, and exit_thread() calls this function to + * free it. In this case the parent (current) and the child share + * the same mm struct. Move forward only when they're same. + */ + if (!tsk->mm || tsk->mm != current->mm) + return; + + /* + * We know shadow stack is enabled but if base is NULL, then + * this task is not managing its own shadow stack (CLONE_VFORK). So + * skip freeing it. + */ + base = get_shstk_base(tsk, &size); + if (!base) + return; + + vm_munmap(base, size); + set_shstk_base(tsk, 0, 0); +} From 61a0200211d31e20380c35d619960a40113da872 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 15/42] riscv: Implement arch-agnostic shadow stack prctls Implement an architecture-agnostic prctl() interface for setting and getting shadow stack status. The prctls implemented are PR_GET_SHADOW_STACK_STATUS, PR_SET_SHADOW_STACK_STATUS and PR_LOCK_SHADOW_STACK_STATUS. As part of PR_SET_SHADOW_STACK_STATUS/PR_GET_SHADOW_STACK_STATUS, only PR_SHADOW_STACK_ENABLE is implemented because RISCV allows each mode to write to their own shadow stack using 'sspush' or 'ssamoswap'. PR_LOCK_SHADOW_STACK_STATUS locks the current shadow stack enablement configuration. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-12-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/usercfi.h | 16 +++++ arch/riscv/kernel/process.c | 8 +++ arch/riscv/kernel/usercfi.c | 110 +++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index a16a5dff8b0e..d71093f414df 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLER__ #include +#include struct task_struct; struct kernel_clone_args; @@ -14,6 +15,7 @@ struct kernel_clone_args; #ifdef CONFIG_RISCV_USER_CFI struct cfi_state { unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ + unsigned long ubcfi_locked : 1; unsigned long user_shdw_stk; /* Current user shadow stack pointer */ unsigned long shdw_stk_base; /* Base address of shadow stack */ unsigned long shdw_stk_size; /* size of shadow stack */ @@ -26,6 +28,12 @@ void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned unsigned long get_shstk_base(struct task_struct *task, unsigned long *size); void set_active_shstk(struct task_struct *task, unsigned long shstk_addr); bool is_shstk_enabled(struct task_struct *task); +bool is_shstk_locked(struct task_struct *task); +bool is_shstk_allocated(struct task_struct *task); +void set_shstk_lock(struct task_struct *task); +void set_shstk_status(struct task_struct *task, bool enable); + +#define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK (PR_SHADOW_STACK_ENABLE) #else @@ -41,6 +49,14 @@ bool is_shstk_enabled(struct task_struct *task); #define is_shstk_enabled(task) false +#define is_shstk_locked(task) false + +#define is_shstk_allocated(task) false + +#define set_shstk_lock(task) do {} while (0) + +#define set_shstk_status(task, enable) do {} while (0) + #endif /* CONFIG_RISCV_USER_CFI */ #endif /* __ASSEMBLER__ */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 72d35adc6e0e..a137d3483646 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -156,6 +156,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, regs->epc = pc; regs->sp = sp; + /* + * clear shadow stack state on exec. + * libc will set it later via prctl. + */ + set_shstk_status(current, false); + set_shstk_base(current, 0, 0); + set_active_shstk(current, 0); + #ifdef CONFIG_64BIT regs->status &= ~SR_UXL; diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index 59cdd6f69d3f..778930f79cfa 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -24,6 +24,16 @@ bool is_shstk_enabled(struct task_struct *task) return task->thread_info.user_cfi_state.ubcfi_en; } +bool is_shstk_allocated(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.shdw_stk_base; +} + +bool is_shstk_locked(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ubcfi_locked; +} + void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size) { task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr; @@ -42,6 +52,26 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr) task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr; } +void set_shstk_status(struct task_struct *task, bool enable) +{ + if (!cpu_supports_shadow_stack()) + return; + + task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0; + + if (enable) + task->thread.envcfg |= ENVCFG_SSE; + else + task->thread.envcfg &= ~ENVCFG_SSE; + + csr_write(CSR_ENVCFG, task->thread.envcfg); +} + +void set_shstk_lock(struct task_struct *task) +{ + task->thread_info.user_cfi_state.ubcfi_locked = 1; +} + /* * If size is 0, then to be compatible with regular stack we want it to be as big as * regular stack. Else PAGE_ALIGN it and return back @@ -258,3 +288,83 @@ void shstk_release(struct task_struct *tsk) vm_munmap(base, size); set_shstk_base(tsk, 0, 0); } + +int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status) +{ + unsigned long bcfi_status = 0; + + if (!cpu_supports_shadow_stack()) + return -EINVAL; + + /* this means shadow stack is enabled on the task */ + bcfi_status |= (is_shstk_enabled(t) ? PR_SHADOW_STACK_ENABLE : 0); + + return copy_to_user(status, &bcfi_status, sizeof(bcfi_status)) ? -EFAULT : 0; +} + +int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status) +{ + unsigned long size = 0, addr = 0; + bool enable_shstk = false; + + if (!cpu_supports_shadow_stack()) + return -EINVAL; + + /* Reject unknown flags */ + if (status & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) + return -EINVAL; + + /* bcfi status is locked and further can't be modified by user */ + if (is_shstk_locked(t)) + return -EINVAL; + + enable_shstk = status & PR_SHADOW_STACK_ENABLE; + /* Request is to enable shadow stack and shadow stack is not enabled already */ + if (enable_shstk && !is_shstk_enabled(t)) { + /* shadow stack was allocated and enable request again + * no need to support such usecase and return EINVAL. + */ + if (is_shstk_allocated(t)) + return -EINVAL; + + size = calc_shstk_size(0); + addr = allocate_shadow_stack(0, size, 0, false); + if (IS_ERR_VALUE(addr)) + return -ENOMEM; + set_shstk_base(t, addr, size); + set_active_shstk(t, addr + size); + } + + /* + * If a request to disable shadow stack happens, let's go ahead and release it + * Although, if CLONE_VFORKed child did this, then in that case we will end up + * not releasing the shadow stack (because it might be needed in parent). Although + * we will disable it for VFORKed child. And if VFORKed child tries to enable again + * then in that case, it'll get entirely new shadow stack because following condition + * are true + * - shadow stack was not enabled for vforked child + * - shadow stack base was anyways pointing to 0 + * This shouldn't be a big issue because we want parent to have availability of shadow + * stack whenever VFORKed child releases resources via exit or exec but at the same + * time we want VFORKed child to break away and establish new shadow stack if it desires + * + */ + if (!enable_shstk) + shstk_release(t); + + set_shstk_status(t, enable_shstk); + return 0; +} + +int arch_lock_shadow_stack_status(struct task_struct *task, + unsigned long arg) +{ + /* If shtstk not supported or not enabled on task, nothing to lock here */ + if (!cpu_supports_shadow_stack() || + !is_shstk_enabled(task) || arg != 0) + return -EINVAL; + + set_shstk_lock(task); + + return 0; +} From 5ca243f6e3c30b979a54a96b96df355dda2b4d0f Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 16/42] prctl: add arch-agnostic prctl()s for indirect branch tracking Three architectures (x86, aarch64, riscv) have support for indirect branch tracking feature in a very similar fashion. On a very high level, indirect branch tracking is a CPU feature where CPU tracks branches which use a memory operand to transfer control. As part of this tracking, during an indirect branch, the CPU expects a landing pad instruction on the target PC, and if not found, the CPU raises some fault (architecture-dependent). x86 landing pad instr - 'ENDBRANCH' arch64 landing pad instr - 'BTI' riscv landing instr - 'lpad' Given that three major architectures have support for indirect branch tracking, this patch creates architecture-agnostic 'prctls' to allow userspace to control this feature. They are: - PR_GET_INDIR_BR_LP_STATUS: Get the current configured status for indirect branch tracking. - PR_SET_INDIR_BR_LP_STATUS: Set the configuration for indirect branch tracking. The following status options are allowed: - PR_INDIR_BR_LP_ENABLE: Enables indirect branch tracking on user thread. - PR_INDIR_BR_LP_DISABLE: Disables indirect branch tracking on user thread. - PR_LOCK_INDIR_BR_LP_STATUS: Locks configured status for indirect branch tracking for user thread. Reviewed-by: Mark Brown Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-13-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description, code comments] Signed-off-by: Paul Walmsley --- include/linux/cpu.h | 4 ++++ include/uapi/linux/prctl.h | 27 +++++++++++++++++++++++++++ kernel/sys.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 487b3bf2e1ea..8239cd95a005 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -229,4 +229,8 @@ static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v) #define smt_mitigations SMT_MITIGATIONS_OFF #endif +int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status); +int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status); +int arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status); + #endif /* _LINUX_CPU_H_ */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 51c4e8c82b1e..f57098fb0ba8 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -386,4 +386,31 @@ struct prctl_mm_map { # define PR_FUTEX_HASH_SET_SLOTS 1 # define PR_FUTEX_HASH_GET_SLOTS 2 +/* + * Get the current indirect branch tracking configuration for the current + * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS. + */ +#define PR_GET_INDIR_BR_LP_STATUS 79 + +/* + * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will + * enable cpu feature for user thread, to track all indirect branches and ensure + * they land on arch defined landing pad instruction. + * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction. + * arch64 - If enabled, an indirect branch must land on a BTI instruction. + * riscv - If enabled, an indirect branch must land on an lpad instruction. + * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect + * branches will no more be tracked by cpu to land on arch defined landing pad + * instruction. + */ +#define PR_SET_INDIR_BR_LP_STATUS 80 +# define PR_INDIR_BR_LP_ENABLE (1UL << 0) + +/* + * Prevent further changes to the specified indirect branch tracking + * configuration. All bits may be locked via this call, including + * undefined bits. + */ +#define PR_LOCK_INDIR_BR_LP_STATUS 81 + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 8b58eece4e58..9071422c1609 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2388,6 +2388,21 @@ int __weak arch_lock_shadow_stack_status(struct task_struct *t, unsigned long st return -EINVAL; } +int __weak arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status) +{ + return -EINVAL; +} + +int __weak arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) +{ + return -EINVAL; +} + +int __weak arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status) +{ + return -EINVAL; +} + #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE) static int prctl_set_vma(unsigned long opt, unsigned long addr, @@ -2868,6 +2883,21 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_FUTEX_HASH: error = futex_hash_prctl(arg2, arg3, arg4); break; + case PR_GET_INDIR_BR_LP_STATUS: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = arch_get_indir_br_lp_status(me, (unsigned long __user *)arg2); + break; + case PR_SET_INDIR_BR_LP_STATUS: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = arch_set_indir_br_lp_status(me, arg2); + break; + case PR_LOCK_INDIR_BR_LP_STATUS: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = arch_lock_indir_br_lp_status(me, arg2); + break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); error = -EINVAL; From 8a9e22d2ca5855263d6e3f83509eabf16d7b8a0a Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 17/42] riscv: Implement indirect branch tracking prctls This patch adds a RISC-V implementation of the following prctls: PR_SET_INDIR_BR_LP_STATUS, PR_GET_INDIR_BR_LP_STATUS and PR_LOCK_INDIR_BR_LP_STATUS. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-14-b55691eacf4f@rivosinc.com [pjw@kernel.org: clean up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/usercfi.h | 14 ++++++ arch/riscv/kernel/entry.S | 4 ++ arch/riscv/kernel/process.c | 5 ++ arch/riscv/kernel/usercfi.c | 79 ++++++++++++++++++++++++++++++++ 4 files changed, 102 insertions(+) diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index d71093f414df..4501d741a609 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -16,6 +16,8 @@ struct kernel_clone_args; struct cfi_state { unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ unsigned long ubcfi_locked : 1; + unsigned long ufcfi_en : 1; /* Enable for forward cfi. Note that ELP goes in sstatus */ + unsigned long ufcfi_locked : 1; unsigned long user_shdw_stk; /* Current user shadow stack pointer */ unsigned long shdw_stk_base; /* Base address of shadow stack */ unsigned long shdw_stk_size; /* size of shadow stack */ @@ -32,6 +34,10 @@ bool is_shstk_locked(struct task_struct *task); bool is_shstk_allocated(struct task_struct *task); void set_shstk_lock(struct task_struct *task); void set_shstk_status(struct task_struct *task, bool enable); +bool is_indir_lp_enabled(struct task_struct *task); +bool is_indir_lp_locked(struct task_struct *task); +void set_indir_lp_status(struct task_struct *task, bool enable); +void set_indir_lp_lock(struct task_struct *task); #define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK (PR_SHADOW_STACK_ENABLE) @@ -57,6 +63,14 @@ void set_shstk_status(struct task_struct *task, bool enable); #define set_shstk_status(task, enable) do {} while (0) +#define is_indir_lp_enabled(task) false + +#define is_indir_lp_locked(task) false + +#define set_indir_lp_status(task, enable) do {} while (0) + +#define set_indir_lp_lock(task) do {} while (0) + #endif /* CONFIG_RISCV_USER_CFI */ #endif /* __ASSEMBLER__ */ diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 0f1394d71720..2d0a9a3ce231 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -174,6 +174,10 @@ SYM_CODE_START(handle_exception) * or vector in kernel space. */ li t0, SR_SUM | SR_FS_VS +#ifdef CONFIG_64BIT + li t1, SR_ELP + or t0, t0, t1 +#endif REG_L s0, TASK_TI_USER_SP(tp) csrrc s1, CSR_STATUS, t0 diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index a137d3483646..49f527e3acfd 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -163,6 +163,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, set_shstk_status(current, false); set_shstk_base(current, 0, 0); set_active_shstk(current, 0); + /* + * disable indirect branch tracking on exec. + * libc will enable it later via prctl. + */ + set_indir_lp_status(current, false); #ifdef CONFIG_64BIT regs->status &= ~SR_UXL; diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index 778930f79cfa..db3ba51af5a5 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -72,6 +72,35 @@ void set_shstk_lock(struct task_struct *task) task->thread_info.user_cfi_state.ubcfi_locked = 1; } +bool is_indir_lp_enabled(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ufcfi_en; +} + +bool is_indir_lp_locked(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ufcfi_locked; +} + +void set_indir_lp_status(struct task_struct *task, bool enable) +{ + if (!cpu_supports_indirect_br_lp_instr()) + return; + + task->thread_info.user_cfi_state.ufcfi_en = enable ? 1 : 0; + + if (enable) + task->thread.envcfg |= ENVCFG_LPE; + else + task->thread.envcfg &= ~ENVCFG_LPE; + + csr_write(CSR_ENVCFG, task->thread.envcfg); +} + +void set_indir_lp_lock(struct task_struct *task) +{ + task->thread_info.user_cfi_state.ufcfi_locked = 1; +} /* * If size is 0, then to be compatible with regular stack we want it to be as big as * regular stack. Else PAGE_ALIGN it and return back @@ -368,3 +397,53 @@ int arch_lock_shadow_stack_status(struct task_struct *task, return 0; } + +int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status) +{ + unsigned long fcfi_status = 0; + + if (!cpu_supports_indirect_br_lp_instr()) + return -EINVAL; + + /* indirect branch tracking is enabled on the task or not */ + fcfi_status |= (is_indir_lp_enabled(t) ? PR_INDIR_BR_LP_ENABLE : 0); + + return copy_to_user(status, &fcfi_status, sizeof(fcfi_status)) ? -EFAULT : 0; +} + +int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) +{ + bool enable_indir_lp = false; + + if (!cpu_supports_indirect_br_lp_instr()) + return -EINVAL; + + /* indirect branch tracking is locked and further can't be modified by user */ + if (is_indir_lp_locked(t)) + return -EINVAL; + + /* Reject unknown flags */ + if (status & ~PR_INDIR_BR_LP_ENABLE) + return -EINVAL; + + enable_indir_lp = (status & PR_INDIR_BR_LP_ENABLE); + set_indir_lp_status(t, enable_indir_lp); + + return 0; +} + +int arch_lock_indir_br_lp_status(struct task_struct *task, + unsigned long arg) +{ + /* + * If indirect branch tracking is not supported or not enabled on task, + * nothing to lock here + */ + if (!cpu_supports_indirect_br_lp_instr() || + !is_indir_lp_enabled(task) || arg != 0) + return -EINVAL; + + set_indir_lp_lock(task); + + return 0; +} From 9d42fc28fc178e5031eddc6f80df561fc586caf4 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:54 -0700 Subject: [PATCH 18/42] riscv/traps: Introduce software check exception and uprobe handling The Zicfiss and Zicfilp extensions introduce a new exception, the 'software check exception', in the privileged ISA, with cause code = 18. This patch implements support for software check exceptions. Additionally, the patch implements a CFI violation handler which checks the code in the xtval register. If xtval=2, the software check exception happened because of an indirect branch that didn't land on a 4 byte aligned PC or on a 'lpad' instruction, or the label value embedded in 'lpad' didn't match the label value set in the x7 register. If xtval=3, the software check exception happened due to a mismatch between the link register (x1 or x5) and the top of shadow stack (on execution of `sspopchk`). In case of a CFI violation, SIGSEGV is raised with code=SEGV_CPERR. SEGV_CPERR was introduced by the x86 shadow stack patches. To keep uprobes working, handle the uprobe event first before reporting the CFI violation in the software check exception handler. This is because, when the landing pad is activated, if the uprobe point is set at the lpad instruction at the beginning of a function, the system triggers a software check exception instead of an ebreak exception due to the exception priority. This would prevent uprobe from working. Reviewed-by: Zong Li Co-developed-by: Zong Li Signed-off-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-15-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up the patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/asm-prototypes.h | 1 + arch/riscv/include/asm/entry-common.h | 2 + arch/riscv/kernel/entry.S | 3 ++ arch/riscv/kernel/traps.c | 54 +++++++++++++++++++++++++ 4 files changed, 60 insertions(+) diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index a9988bf21ec8..41ec5cdec367 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -51,6 +51,7 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_u); DECLARE_DO_ERROR_INFO(do_trap_ecall_s); DECLARE_DO_ERROR_INFO(do_trap_ecall_m); DECLARE_DO_ERROR_INFO(do_trap_break); +DECLARE_DO_ERROR_INFO(do_trap_software_check); asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs); asmlinkage void ret_from_fork_user(struct pt_regs *regs); diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index b28ccc6cdeea..34ed149af5d1 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -40,4 +40,6 @@ static inline int handle_misaligned_store(struct pt_regs *regs) } #endif +bool handle_user_cfi_violation(struct pt_regs *regs); + #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 2d0a9a3ce231..60eb221296a6 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -495,6 +495,9 @@ SYM_DATA_START_LOCAL(excp_vect_table) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ + RISCV_PTR do_trap_unknown /* cause=16 */ + RISCV_PTR do_trap_unknown /* cause=17 */ + RISCV_PTR do_trap_software_check /* cause=18 is sw check exception */ SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end) #ifndef CONFIG_MMU diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 47afea4ff1a8..5fb57fad188a 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -368,6 +368,60 @@ void do_trap_ecall_u(struct pt_regs *regs) } +#define CFI_TVAL_FCFI_CODE 2 +#define CFI_TVAL_BCFI_CODE 3 +/* handle cfi violations */ +bool handle_user_cfi_violation(struct pt_regs *regs) +{ + unsigned long tval = csr_read(CSR_TVAL); + bool is_fcfi = (tval == CFI_TVAL_FCFI_CODE && cpu_supports_indirect_br_lp_instr()); + bool is_bcfi = (tval == CFI_TVAL_BCFI_CODE && cpu_supports_shadow_stack()); + + /* + * Handle uprobe event first. The probe point can be a valid target + * of indirect jumps or calls, in this case, forward cfi violation + * will be triggered instead of breakpoint exception. Clear ELP flag + * on sstatus image as well to avoid recurring fault. + */ + if (is_fcfi && probe_breakpoint_handler(regs)) { + regs->status &= ~SR_ELP; + return true; + } + + if (is_fcfi || is_bcfi) { + do_trap_error(regs, SIGSEGV, SEGV_CPERR, regs->epc, + "Oops - control flow violation"); + return true; + } + + return false; +} + +/* + * software check exception is defined with risc-v cfi spec. Software check + * exception is raised when: + * a) An indirect branch doesn't land on 4 byte aligned PC or `lpad` + * instruction or `label` value programmed in `lpad` instr doesn't + * match with value setup in `x7`. reported code in `xtval` is 2. + * b) `sspopchk` instruction finds a mismatch between top of shadow stack (ssp) + * and x1/x5. reported code in `xtval` is 3. + */ +asmlinkage __visible __trap_section void do_trap_software_check(struct pt_regs *regs) +{ + if (user_mode(regs)) { + irqentry_enter_from_user_mode(regs); + + /* not a cfi violation, then merge into flow of unknown trap handler */ + if (!handle_user_cfi_violation(regs)) + do_trap_unknown(regs); + + irqentry_exit_to_user_mode(regs); + } else { + /* sw check exception coming from kernel is a bug in kernel */ + die(regs, "Kernel BUG"); + } +} + #ifdef CONFIG_MMU asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) { From 66c9c713de597f9b40a319ebda4d3466ce2cdff0 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 19/42] riscv/signal: save and restore the shadow stack on a signal Save the shadow stack pointer in the sigcontext structure when delivering a signal. Restore the shadow stack pointer from sigcontext on sigreturn. As part of the save operation, the kernel uses the 'ssamoswap' instruction to save a snapshot of the current shadow stack on the shadow stack itself (this can be called a "save token"). During restore on sigreturn, the kernel retrieves the save token from the top of the shadow stack and validates it. This ensures that user mode can't arbitrarily pivot to any shadow stack address without having a token and thus provides a strong security assurance during the window between signal delivery and sigreturn. Use an ABI-compatible way of saving/restoring the shadow stack pointer into the signal stack. This follows the vector extension, where extra registers are placed in a form of extension header + extension body in the stack. The extension header indicates the size of the extra architectural states plus the size of header itself, and a magic identifier for the extension. Then, the extension body contains the new architectural states in the form defined by uapi. Signed-off-by: Andy Chiu Signed-off-by: Deepak Gupta Tested-by: Andreas Korb Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-17-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned patch description, code comments; resolved checkpatch warning] Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/usercfi.h | 10 +++ arch/riscv/include/uapi/asm/ptrace.h | 4 ++ arch/riscv/include/uapi/asm/sigcontext.h | 1 + arch/riscv/kernel/signal.c | 86 ++++++++++++++++++++++++ arch/riscv/kernel/usercfi.c | 57 ++++++++++++++++ 5 files changed, 158 insertions(+) diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index 4501d741a609..ec4b8a53eb74 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -8,6 +8,7 @@ #ifndef __ASSEMBLER__ #include #include +#include struct task_struct; struct kernel_clone_args; @@ -34,6 +35,9 @@ bool is_shstk_locked(struct task_struct *task); bool is_shstk_allocated(struct task_struct *task); void set_shstk_lock(struct task_struct *task); void set_shstk_status(struct task_struct *task, bool enable); +unsigned long get_active_shstk(struct task_struct *task); +int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr); +int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr); bool is_indir_lp_enabled(struct task_struct *task); bool is_indir_lp_locked(struct task_struct *task); void set_indir_lp_status(struct task_struct *task, bool enable); @@ -71,6 +75,12 @@ void set_indir_lp_lock(struct task_struct *task); #define set_indir_lp_lock(task) do {} while (0) +#define restore_user_shstk(tsk, shstk_ptr) -EINVAL + +#define save_user_shstk(tsk, saved_shstk_ptr) -EINVAL + +#define get_active_shstk(task) 0UL + #endif /* CONFIG_RISCV_USER_CFI */ #endif /* __ASSEMBLER__ */ diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index beff8df80ac9..261bfe70f60a 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -127,6 +127,10 @@ struct __riscv_v_regset_state { */ #define RISCV_MAX_VLENB (8192) +struct __sc_riscv_cfi_state { + unsigned long ss_ptr; /* shadow stack pointer */ +}; + #endif /* __ASSEMBLER__ */ #endif /* _UAPI_ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h index 748dffc9ae19..d22d0815d605 100644 --- a/arch/riscv/include/uapi/asm/sigcontext.h +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -10,6 +10,7 @@ /* The Magic number for signal context frame header. */ #define RISCV_V_MAGIC 0x53465457 +#define RISCV_ZICFISS_MAGIC 0x9487 #define END_MAGIC 0x0 /* The size of END signal context header. */ diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index dbb067e345f0..59784dc117e4 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -22,11 +22,13 @@ #include #include #include +#include unsigned long signal_minsigstksz __ro_after_init; extern u32 __user_rt_sigreturn[2]; static size_t riscv_v_sc_size __ro_after_init; +static size_t riscv_zicfiss_sc_size __ro_after_init; #define DEBUG_SIG 0 @@ -140,6 +142,62 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); } +static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi) +{ + struct __sc_riscv_cfi_state __user *state = sc_cfi; + unsigned long ss_ptr = 0; + long err = 0; + + if (!is_shstk_enabled(current)) + return 0; + + /* + * Save a pointer to the shadow stack itself on shadow stack as a form of token. + * A token on the shadow stack gives the following properties: + * - Safe save and restore for shadow stack switching. Any save of a shadow stack + * must have saved a token on the shadow stack. Similarly any restore of shadow + * stack must check the token before restore. Since writing to the shadow stack with + * address of the shadow stack itself is not easily allowed, a restore without a save + * is quite difficult for an attacker to perform. + * - A natural break. A token in shadow stack provides a natural break in shadow stack + * So a single linear range can be bucketed into different shadow stack segments. Any + * sspopchk will detect the condition and fault to kernel as a sw check exception. + */ + err |= save_user_shstk(current, &ss_ptr); + err |= __put_user(ss_ptr, &state->ss_ptr); + if (unlikely(err)) + return -EFAULT; + + return riscv_zicfiss_sc_size; +} + +static long __restore_cfiss_state(struct pt_regs *regs, void __user *sc_cfi) +{ + struct __sc_riscv_cfi_state __user *state = sc_cfi; + unsigned long ss_ptr = 0; + long err; + + /* + * Restore shadow stack as a form of token stored on the shadow stack itself as a safe + * way to restore. + * A token on the shadow stack gives the following properties: + * - Safe save and restore for shadow stack switching. Any save of shadow stack + * must have saved a token on shadow stack. Similarly any restore of shadow + * stack must check the token before restore. Since writing to a shadow stack with + * the address of shadow stack itself is not easily allowed, a restore without a save + * is quite difficult for an attacker to perform. + * - A natural break. A token in the shadow stack provides a natural break in shadow stack + * So a single linear range can be bucketed into different shadow stack segments. + * sspopchk will detect the condition and fault to kernel as a sw check exception. + */ + err = __copy_from_user(&ss_ptr, &state->ss_ptr, sizeof(unsigned long)); + + if (unlikely(err)) + return err; + + return restore_user_shstk(current, ss_ptr); +} + struct arch_ext_priv { __u32 magic; long (*save)(struct pt_regs *regs, void __user *sc_vec); @@ -150,6 +208,10 @@ static struct arch_ext_priv arch_ext_list[] = { .magic = RISCV_V_MAGIC, .save = &save_v_state, }, + { + .magic = RISCV_ZICFISS_MAGIC, + .save = &save_cfiss_state, + }, }; static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); @@ -202,6 +264,12 @@ static long restore_sigcontext(struct pt_regs *regs, err = __restore_v_state(regs, sc_ext_ptr); break; + case RISCV_ZICFISS_MAGIC: + if (!is_shstk_enabled(current) || size != riscv_zicfiss_sc_size) + return -EINVAL; + + err = __restore_cfiss_state(regs, sc_ext_ptr); + break; default: return -EINVAL; } @@ -223,6 +291,16 @@ static size_t get_rt_frame_size(bool cal_all) total_context_size += riscv_v_sc_size; } + if (is_shstk_enabled(current)) + total_context_size += riscv_zicfiss_sc_size; + + /* + * Preserved a __riscv_ctx_hdr for END signal context header if an + * extension uses __riscv_extra_ext_header + */ + if (total_context_size) + total_context_size += sizeof(struct __riscv_ctx_hdr); + frame_size += total_context_size; frame_size = round_up(frame_size, 16); @@ -359,6 +437,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, #ifdef CONFIG_MMU regs->ra = (unsigned long)VDSO_SYMBOL( current->mm->context.vdso, rt_sigreturn); + + /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */ + if (is_shstk_enabled(current)) + regs->t0 = regs->ra; + #else /* * For the nommu case we don't have a VDSO. Instead we push two @@ -487,6 +570,9 @@ void __init init_rt_signal_env(void) { riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) + sizeof(struct __sc_riscv_v_state) + riscv_v_vsize; + + riscv_zicfiss_sc_size = sizeof(struct __riscv_ctx_hdr) + + sizeof(struct __sc_riscv_cfi_state); /* * Determine the stack space required for guaranteed signal delivery. * The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index db3ba51af5a5..7cec00ca1df4 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -52,6 +52,11 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr) task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr; } +unsigned long get_active_shstk(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.user_shdw_stk; +} + void set_shstk_status(struct task_struct *task, bool enable) { if (!cpu_supports_shadow_stack()) @@ -168,6 +173,58 @@ static int create_rstor_token(unsigned long ssp, unsigned long *token_addr) return 0; } +/* + * Save user shadow stack pointer on the shadow stack itself and return a pointer to saved location. + * Returns -EFAULT if unsuccessful. + */ +int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr) +{ + unsigned long ss_ptr = 0; + unsigned long token_loc = 0; + int ret = 0; + + if (!saved_shstk_ptr) + return -EINVAL; + + ss_ptr = get_active_shstk(tsk); + ret = create_rstor_token(ss_ptr, &token_loc); + + if (!ret) { + *saved_shstk_ptr = token_loc; + set_active_shstk(tsk, token_loc); + } + + return ret; +} + +/* + * Restores the user shadow stack pointer from the token on the shadow stack for task 'tsk'. + * Returns -EFAULT if unsuccessful. + */ +int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr) +{ + unsigned long token = 0; + + token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0); + + if (token == -1) + return -EFAULT; + + /* invalid token, return EINVAL */ + if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE) { + pr_info_ratelimited("%s[%d]: bad restore token in %s: pc=%p sp=%p, token=%p, shstk_ptr=%p\n", + tsk->comm, task_pid_nr(tsk), __func__, + (void *)(task_pt_regs(tsk)->epc), + (void *)(task_pt_regs(tsk)->sp), + (void *)token, (void *)shstk_ptr); + return -EINVAL; + } + + /* all checks passed, set active shstk and return success */ + set_active_shstk(tsk, token); + return 0; +} + static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size, unsigned long token_offset, bool set_tok) { From 9d0e75e25e3be74828ffb7657992ce0f03352cc3 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 20/42] riscv/kernel: update __show_regs() to print shadow stack register Update __show_regs() to print the captured shadow stack pointer. On tasks where shadow stack is disabled, simply print 0. Signed-off-by: Deepak Gupta Reviewed-by: Alexandre Ghiti Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-18-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/kernel/process.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 49f527e3acfd..aacb23978f93 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -93,8 +93,8 @@ void __show_regs(struct pt_regs *regs) regs->s8, regs->s9, regs->s10); pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n", regs->s11, regs->t3, regs->t4); - pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", - regs->t5, regs->t6); + pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT " ssp : " REG_FMT "\n", + regs->t5, regs->t6, get_active_shstk(current)); pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n", regs->status, regs->badaddr, regs->cause); From 2af7c9cf021c5dabe880b68e5cc22c618060d954 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 21/42] riscv/ptrace: expose riscv CFI status and state via ptrace and in core files Expose a new register type NT_RISCV_USER_CFI for risc-v CFI status and state. Intentionally, both landing pad and shadow stack status and state are rolled into the CFI state. Creating two different NT_RISCV_USER_XXX would not be useful and would waste a note type. Enabling, disabling and locking the CFI feature is not allowed via ptrace set interface. However, setting 'elp' state or setting shadow stack pointer are allowed via the ptrace set interface. It is expected that 'gdb' might need to fixup 'elp' state or 'shadow stack' pointer. Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-19-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated to apply; cleaned patch description and comments; addressed checkpatch issues] Signed-off-by: Paul Walmsley --- arch/riscv/include/uapi/asm/ptrace.h | 30 +++++++++ arch/riscv/kernel/ptrace.c | 95 ++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 2 + 3 files changed, 127 insertions(+) diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index 261bfe70f60a..18988a5f1a63 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -131,6 +131,36 @@ struct __sc_riscv_cfi_state { unsigned long ss_ptr; /* shadow stack pointer */ }; +#define PTRACE_CFI_LP_EN_BIT 0 +#define PTRACE_CFI_LP_LOCK_BIT 1 +#define PTRACE_CFI_ELP_BIT 2 +#define PTRACE_CFI_SS_EN_BIT 3 +#define PTRACE_CFI_SS_LOCK_BIT 4 +#define PTRACE_CFI_SS_PTR_BIT 5 + +#define PTRACE_CFI_LP_EN_STATE BIT(PTRACE_CFI_LP_EN_BIT) +#define PTRACE_CFI_LP_LOCK_STATE BIT(PTRACE_CFI_LP_LOCK_BIT) +#define PTRACE_CFI_ELP_STATE BIT(PTRACE_CFI_ELP_BIT) +#define PTRACE_CFI_SS_EN_STATE BIT(PTRACE_CFI_SS_EN_BIT) +#define PTRACE_CFI_SS_LOCK_STATE BIT(PTRACE_CFI_SS_LOCK_BIT) +#define PTRACE_CFI_SS_PTR_STATE BIT(PTRACE_CFI_SS_PTR_BIT) + +#define PRACE_CFI_STATE_INVALID_MASK ~(PTRACE_CFI_LP_EN_STATE | \ + PTRACE_CFI_LP_LOCK_STATE | \ + PTRACE_CFI_ELP_STATE | \ + PTRACE_CFI_SS_EN_STATE | \ + PTRACE_CFI_SS_LOCK_STATE | \ + PTRACE_CFI_SS_PTR_STATE) + +struct __cfi_status { + __u64 cfi_state; +}; + +struct user_cfi_state { + struct __cfi_status cfi_status; + __u64 shstk_ptr; +}; + #endif /* __ASSEMBLER__ */ #endif /* _UAPI_ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index e6272d74572f..57e257d459e8 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -19,6 +19,7 @@ #include #include #include +#include enum riscv_regset { REGSET_X, @@ -31,6 +32,9 @@ enum riscv_regset { #ifdef CONFIG_RISCV_ISA_SUPM REGSET_TAGGED_ADDR_CTRL, #endif +#ifdef CONFIG_RISCV_USER_CFI + REGSET_CFI, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -195,6 +199,87 @@ static int tagged_addr_ctrl_set(struct task_struct *target, } #endif +#ifdef CONFIG_RISCV_USER_CFI +static int riscv_cfi_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_cfi_state user_cfi; + struct pt_regs *regs; + + memset(&user_cfi, 0, sizeof(user_cfi)); + regs = task_pt_regs(target); + + if (is_indir_lp_enabled(target)) { + user_cfi.cfi_status.cfi_state |= PTRACE_CFI_LP_EN_STATE; + user_cfi.cfi_status.cfi_state |= is_indir_lp_locked(target) ? + PTRACE_CFI_LP_LOCK_STATE : 0; + user_cfi.cfi_status.cfi_state |= (regs->status & SR_ELP) ? + PTRACE_CFI_ELP_STATE : 0; + } + + if (is_shstk_enabled(target)) { + user_cfi.cfi_status.cfi_state |= (PTRACE_CFI_SS_EN_STATE | + PTRACE_CFI_SS_PTR_STATE); + user_cfi.cfi_status.cfi_state |= is_shstk_locked(target) ? + PTRACE_CFI_SS_LOCK_STATE : 0; + user_cfi.shstk_ptr = get_active_shstk(target); + } + + return membuf_write(&to, &user_cfi, sizeof(user_cfi)); +} + +/* + * Does it make sense to allow enable / disable of cfi via ptrace? + * We don't allow enable / disable / locking control via ptrace for now. + * Setting the shadow stack pointer is allowed. GDB might use it to unwind or + * some other fixup. Similarly gdb might want to suppress elp and may want + * to reset elp state. + */ +static int riscv_cfi_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_cfi_state user_cfi; + struct pt_regs *regs; + + regs = task_pt_regs(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_cfi, 0, -1); + if (ret) + return ret; + + /* + * Not allowing enabling or locking shadow stack or landing pad + * There is no disabling of shadow stack or landing pad via ptrace + * rsvd field should be set to zero so that if those fields are needed in future + */ + if ((user_cfi.cfi_status.cfi_state & + (PTRACE_CFI_LP_EN_STATE | PTRACE_CFI_LP_LOCK_STATE | + PTRACE_CFI_SS_EN_STATE | PTRACE_CFI_SS_LOCK_STATE)) || + (user_cfi.cfi_status.cfi_state & PRACE_CFI_STATE_INVALID_MASK)) + return -EINVAL; + + /* If lpad is enabled on target and ptrace requests to set / clear elp, do that */ + if (is_indir_lp_enabled(target)) { + if (user_cfi.cfi_status.cfi_state & + PTRACE_CFI_ELP_STATE) /* set elp state */ + regs->status |= SR_ELP; + else + regs->status &= ~SR_ELP; /* clear elp state */ + } + + /* If shadow stack enabled on target, set new shadow stack pointer */ + if (is_shstk_enabled(target) && + (user_cfi.cfi_status.cfi_state & PTRACE_CFI_SS_PTR_STATE)) + set_active_shstk(target, user_cfi.shstk_ptr); + + return 0; +} +#endif + static struct user_regset riscv_user_regset[] __ro_after_init = { [REGSET_X] = { USER_REGSET_NOTE_TYPE(PRSTATUS), @@ -234,6 +319,16 @@ static struct user_regset riscv_user_regset[] __ro_after_init = { .set = tagged_addr_ctrl_set, }, #endif +#ifdef CONFIG_RISCV_USER_CFI + [REGSET_CFI] = { + .core_note_type = NT_RISCV_USER_CFI, + .align = sizeof(__u64), + .n = sizeof(struct user_cfi_state) / sizeof(__u64), + .size = sizeof(__u64), + .regset_get = riscv_cfi_get, + .set = riscv_cfi_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 819ded2d39de..ee30dcd80901 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -545,6 +545,8 @@ typedef struct elf64_shdr { #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ #define NN_RISCV_TAGGED_ADDR_CTRL "LINUX" #define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */ +#define NN_RISCV_USER_CFI "LINUX" +#define NT_RISCV_USER_CFI 0x903 /* RISC-V shadow stack state */ #define NN_LOONGARCH_CPUCFG "LINUX" #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NN_LOONGARCH_CSR "LINUX" From 462a94fb8ae8ba0d4d3901c7283b4af052ab8804 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 22/42] riscv: hwprobe: add support for RISCV_HWPROBE_KEY_IMA_EXT_1 We've run out of bits to describe RISC-V ISA extensions in our initial hwprobe key, RISCV_HWPROBE_KEY_IMA_EXT_0. So, let's add RISCV_HWPROBE_KEY_IMA_EXT_1, along with the framework to set the appropriate hwprobe tuple, and add testing for it. Based on a suggestion from Andrew Jones , also fix the documentation for RISCV_HWPROBE_KEY_IMA_EXT_0. Reviewed-by: Andrew Jones Signed-off-by: Paul Walmsley --- Documentation/arch/riscv/hwprobe.rst | 6 +- arch/riscv/include/asm/hwprobe.h | 3 +- arch/riscv/include/uapi/asm/hwprobe.h | 1 + arch/riscv/kernel/sys_hwprobe.c | 169 +++++++++++------- .../selftests/riscv/hwprobe/which-cpus.c | 18 +- 5 files changed, 121 insertions(+), 76 deletions(-) diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index 641ec4abb906..c420a8349bc6 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -67,7 +67,7 @@ The following keys are defined: programs (it may still be executed in userspace via a kernel-controlled mechanism such as the vDSO). -* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions +* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing extensions that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: base system behavior. @@ -387,3 +387,7 @@ The following keys are defined: * :c:macro:`RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE`: An unsigned int which represents the size of the Zicbop block in bytes. + +* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_1`: A bitmask containing additional + extensions that are compatible with the + :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: base system behavior. diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 8c572a464719..8b9f5e1cf4cb 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include -#define RISCV_HWPROBE_MAX_KEY 15 +#define RISCV_HWPROBE_MAX_KEY 16 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { @@ -20,6 +20,7 @@ static inline bool hwprobe_key_is_bitmask(__s64 key) switch (key) { case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: case RISCV_HWPROBE_KEY_IMA_EXT_0: + case RISCV_HWPROBE_KEY_IMA_EXT_1: case RISCV_HWPROBE_KEY_CPUPERF_0: case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0: diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index cd3c126730c3..ed2621a5a47d 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -113,6 +113,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13 #define RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0 14 #define RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE 15 +#define RISCV_HWPROBE_KEY_IMA_EXT_1 16 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index e6787ba7f2fc..53731ace7984 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -24,6 +24,14 @@ #include +#define EXT_KEY(isa_arg, ext, pv, missing) \ + do { \ + if (__riscv_isa_extension_available(isa_arg, RISCV_ISA_EXT_##ext)) \ + pv |= RISCV_HWPROBE_EXT_##ext; \ + else \ + missing |= RISCV_HWPROBE_EXT_##ext; \ + } while (false) + static void hwprobe_arch_id(struct riscv_hwprobe *pair, const struct cpumask *cpus) { @@ -93,90 +101,109 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, for_each_cpu(cpu, cpus) { struct riscv_isainfo *isainfo = &hart_isa[cpu]; -#define EXT_KEY(ext) \ - do { \ - if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ - pair->value |= RISCV_HWPROBE_EXT_##ext; \ - else \ - missing |= RISCV_HWPROBE_EXT_##ext; \ - } while (false) - /* * Only use EXT_KEY() for extensions which can be exposed to userspace, * regardless of the kernel's configuration, as no other checks, besides * presence in the hart_isa bitmap, are made. */ - EXT_KEY(ZAAMO); - EXT_KEY(ZABHA); - EXT_KEY(ZACAS); - EXT_KEY(ZALASR); - EXT_KEY(ZALRSC); - EXT_KEY(ZAWRS); - EXT_KEY(ZBA); - EXT_KEY(ZBB); - EXT_KEY(ZBC); - EXT_KEY(ZBKB); - EXT_KEY(ZBKC); - EXT_KEY(ZBKX); - EXT_KEY(ZBS); - EXT_KEY(ZCA); - EXT_KEY(ZCB); - EXT_KEY(ZCLSD); - EXT_KEY(ZCMOP); - EXT_KEY(ZICBOM); - EXT_KEY(ZICBOP); - EXT_KEY(ZICBOZ); - EXT_KEY(ZICNTR); - EXT_KEY(ZICOND); - EXT_KEY(ZIHINTNTL); - EXT_KEY(ZIHINTPAUSE); - EXT_KEY(ZIHPM); - EXT_KEY(ZILSD); - EXT_KEY(ZIMOP); - EXT_KEY(ZKND); - EXT_KEY(ZKNE); - EXT_KEY(ZKNH); - EXT_KEY(ZKSED); - EXT_KEY(ZKSH); - EXT_KEY(ZKT); - EXT_KEY(ZTSO); + EXT_KEY(isainfo->isa, ZAAMO, pair->value, missing); + EXT_KEY(isainfo->isa, ZABHA, pair->value, missing); + EXT_KEY(isainfo->isa, ZACAS, pair->value, missing); + EXT_KEY(isainfo->isa, ZALASR, pair->value, missing); + EXT_KEY(isainfo->isa, ZALRSC, pair->value, missing); + EXT_KEY(isainfo->isa, ZAWRS, pair->value, missing); + EXT_KEY(isainfo->isa, ZBA, pair->value, missing); + EXT_KEY(isainfo->isa, ZBB, pair->value, missing); + EXT_KEY(isainfo->isa, ZBC, pair->value, missing); + EXT_KEY(isainfo->isa, ZBKB, pair->value, missing); + EXT_KEY(isainfo->isa, ZBKC, pair->value, missing); + EXT_KEY(isainfo->isa, ZBKX, pair->value, missing); + EXT_KEY(isainfo->isa, ZBS, pair->value, missing); + EXT_KEY(isainfo->isa, ZCA, pair->value, missing); + EXT_KEY(isainfo->isa, ZCB, pair->value, missing); + EXT_KEY(isainfo->isa, ZCLSD, pair->value, missing); + EXT_KEY(isainfo->isa, ZCMOP, pair->value, missing); + EXT_KEY(isainfo->isa, ZICBOM, pair->value, missing); + EXT_KEY(isainfo->isa, ZICBOP, pair->value, missing); + EXT_KEY(isainfo->isa, ZICBOZ, pair->value, missing); + EXT_KEY(isainfo->isa, ZICNTR, pair->value, missing); + EXT_KEY(isainfo->isa, ZICOND, pair->value, missing); + EXT_KEY(isainfo->isa, ZIHINTNTL, pair->value, missing); + EXT_KEY(isainfo->isa, ZIHINTPAUSE, pair->value, missing); + EXT_KEY(isainfo->isa, ZIHPM, pair->value, missing); + EXT_KEY(isainfo->isa, ZILSD, pair->value, missing); + EXT_KEY(isainfo->isa, ZIMOP, pair->value, missing); + EXT_KEY(isainfo->isa, ZKND, pair->value, missing); + EXT_KEY(isainfo->isa, ZKNE, pair->value, missing); + EXT_KEY(isainfo->isa, ZKNH, pair->value, missing); + EXT_KEY(isainfo->isa, ZKSED, pair->value, missing); + EXT_KEY(isainfo->isa, ZKSH, pair->value, missing); + EXT_KEY(isainfo->isa, ZKT, pair->value, missing); + EXT_KEY(isainfo->isa, ZTSO, pair->value, missing); /* * All the following extensions must depend on the kernel * support of V. */ if (has_vector()) { - EXT_KEY(ZVBB); - EXT_KEY(ZVBC); - EXT_KEY(ZVE32F); - EXT_KEY(ZVE32X); - EXT_KEY(ZVE64D); - EXT_KEY(ZVE64F); - EXT_KEY(ZVE64X); - EXT_KEY(ZVFBFMIN); - EXT_KEY(ZVFBFWMA); - EXT_KEY(ZVFH); - EXT_KEY(ZVFHMIN); - EXT_KEY(ZVKB); - EXT_KEY(ZVKG); - EXT_KEY(ZVKNED); - EXT_KEY(ZVKNHA); - EXT_KEY(ZVKNHB); - EXT_KEY(ZVKSED); - EXT_KEY(ZVKSH); - EXT_KEY(ZVKT); + EXT_KEY(isainfo->isa, ZVBB, pair->value, missing); + EXT_KEY(isainfo->isa, ZVBC, pair->value, missing); + EXT_KEY(isainfo->isa, ZVE32F, pair->value, missing); + EXT_KEY(isainfo->isa, ZVE32X, pair->value, missing); + EXT_KEY(isainfo->isa, ZVE64D, pair->value, missing); + EXT_KEY(isainfo->isa, ZVE64F, pair->value, missing); + EXT_KEY(isainfo->isa, ZVE64X, pair->value, missing); + EXT_KEY(isainfo->isa, ZVFBFMIN, pair->value, missing); + EXT_KEY(isainfo->isa, ZVFBFWMA, pair->value, missing); + EXT_KEY(isainfo->isa, ZVFH, pair->value, missing); + EXT_KEY(isainfo->isa, ZVFHMIN, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKB, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKG, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKNED, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKNHA, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKNHB, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKSED, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKSH, pair->value, missing); + EXT_KEY(isainfo->isa, ZVKT, pair->value, missing); } - EXT_KEY(ZCD); - EXT_KEY(ZCF); - EXT_KEY(ZFA); - EXT_KEY(ZFBFMIN); - EXT_KEY(ZFH); - EXT_KEY(ZFHMIN); + EXT_KEY(isainfo->isa, ZCD, pair->value, missing); + EXT_KEY(isainfo->isa, ZCF, pair->value, missing); + EXT_KEY(isainfo->isa, ZFA, pair->value, missing); + EXT_KEY(isainfo->isa, ZFBFMIN, pair->value, missing); + EXT_KEY(isainfo->isa, ZFH, pair->value, missing); + EXT_KEY(isainfo->isa, ZFHMIN, pair->value, missing); if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) - EXT_KEY(SUPM); -#undef EXT_KEY + EXT_KEY(isainfo->isa, SUPM, pair->value, missing); + } + + /* Now turn off reporting features if any CPU is missing it. */ + pair->value &= ~missing; +} + +static void hwprobe_isa_ext1(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + int cpu; + u64 missing = 0; + + pair->value = 0; + + /* + * Loop through and record extensions that 1) anyone has, and 2) anyone + * doesn't have. + */ + for_each_cpu(cpu, cpus) { + /* struct riscv_isainfo *isainfo = &hart_isa[cpu]; */ + + /* + * Only use EXT_KEY() for extensions which can be + * exposed to userspace, regardless of the kernel's + * configuration, as no other checks, besides presence + * in the hart_isa bitmap, are made. + */ + /* Nothing here yet */ } /* Now turn off reporting features if any CPU is missing it. */ @@ -287,6 +314,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair, hwprobe_isa_ext0(pair, cpus); break; + case RISCV_HWPROBE_KEY_IMA_EXT_1: + hwprobe_isa_ext1(pair, cpus); + break; + case RISCV_HWPROBE_KEY_CPUPERF_0: case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF: pair->value = hwprobe_misaligned(cpus); diff --git a/tools/testing/selftests/riscv/hwprobe/which-cpus.c b/tools/testing/selftests/riscv/hwprobe/which-cpus.c index 3ab53067e8dd..587feb198c04 100644 --- a/tools/testing/selftests/riscv/hwprobe/which-cpus.c +++ b/tools/testing/selftests/riscv/hwprobe/which-cpus.c @@ -83,9 +83,9 @@ static void do_which_cpus(int argc, char **argv, cpu_set_t *cpus) int main(int argc, char **argv) { - struct riscv_hwprobe pairs[2]; + struct riscv_hwprobe pairs[3]; cpu_set_t cpus_aff, cpus; - __u64 ext0_all; + __u64 ext0_all, ext1_all; long rc; rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus_aff); @@ -112,6 +112,11 @@ int main(int argc, char **argv) assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_IMA_EXT_0); ext0_all = pairs[0].value; + pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_1, }; + rc = riscv_hwprobe(pairs, 1, 0, NULL, 0); + assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_IMA_EXT_1); + ext1_all = pairs[0].value; + pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, }; CPU_ZERO(&cpus); rc = riscv_hwprobe(pairs, 1, 0, (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); @@ -134,20 +139,23 @@ int main(int argc, char **argv) pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, }; pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, }; + pairs[2] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_1, .value = ext1_all, }; CPU_ZERO(&cpus); - rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); + rc = riscv_hwprobe(pairs, 3, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == sysconf(_SC_NPROCESSORS_ONLN), "set all cpus\n"); pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, }; pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, }; + pairs[2] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_1, .value = ext1_all, }; memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t)); - rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); + rc = riscv_hwprobe(pairs, 3, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); ksft_test_result(rc == 0 && CPU_EQUAL(&cpus, &cpus_aff), "set all affinity cpus\n"); pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, }; pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ~ext0_all, }; + pairs[2] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_1, .value = ~ext1_all, }; memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t)); - rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); + rc = riscv_hwprobe(pairs, 3, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS); ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == 0, "clear all cpus\n"); ksft_finished(); From 30c3099036a9544ec24e899abc8a81a7cc030f99 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 23/42] riscv/hwprobe: add zicfilp / zicfiss enumeration in hwprobe Add enumeration of the zicfilp and zicfiss extensions in the hwprobe syscall. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-20-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated to apply; extend into RISCV_HWPROBE_KEY_IMA_EXT_1; clean patch description] Signed-off-by: Paul Walmsley --- arch/riscv/include/uapi/asm/hwprobe.h | 3 +++ arch/riscv/kernel/sys_hwprobe.c | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index ed2621a5a47d..9139edba0aec 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -86,6 +86,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZICBOP (1ULL << 60) #define RISCV_HWPROBE_EXT_ZILSD (1ULL << 61) #define RISCV_HWPROBE_EXT_ZCLSD (1ULL << 62) +#define RISCV_HWPROBE_EXT_ZICFILP (1ULL << 63) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) @@ -114,6 +115,8 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0 14 #define RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE 15 #define RISCV_HWPROBE_KEY_IMA_EXT_1 16 +#define RISCV_HWPROBE_EXT_ZICFISS (1ULL << 0) + /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 53731ace7984..1659d31fd288 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -126,6 +126,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(isainfo->isa, ZICBOM, pair->value, missing); EXT_KEY(isainfo->isa, ZICBOP, pair->value, missing); EXT_KEY(isainfo->isa, ZICBOZ, pair->value, missing); + EXT_KEY(isainfo->isa, ZICFILP, pair->value, missing); EXT_KEY(isainfo->isa, ZICNTR, pair->value, missing); EXT_KEY(isainfo->isa, ZICOND, pair->value, missing); EXT_KEY(isainfo->isa, ZIHINTNTL, pair->value, missing); @@ -195,7 +196,7 @@ static void hwprobe_isa_ext1(struct riscv_hwprobe *pair, * doesn't have. */ for_each_cpu(cpu, cpus) { - /* struct riscv_isainfo *isainfo = &hart_isa[cpu]; */ + struct riscv_isainfo *isainfo = &hart_isa[cpu]; /* * Only use EXT_KEY() for extensions which can be @@ -203,7 +204,7 @@ static void hwprobe_isa_ext1(struct riscv_hwprobe *pair, * configuration, as no other checks, besides presence * in the hart_isa bitmap, are made. */ - /* Nothing here yet */ + EXT_KEY(isainfo->isa, ZICFISS, pair->value, missing); } /* Now turn off reporting features if any CPU is missing it. */ From c9b859c4d8f56c014b3d5fbd1bcfb916c34955a1 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 24/42] riscv: add kernel command line option to opt out of user CFI Add a kernel command line option to disable part or all of user CFI. User backward CFI and forward CFI can be controlled independently. The kernel command line parameter "riscv_nousercfi" can take the following values: - "all" : Disable forward and backward cfi both - "bcfi" : Disable backward cfi - "fcfi" : Disable forward cfi Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-21-b55691eacf4f@rivosinc.com [pjw@kernel.org: fixed warnings from checkpatch; cleaned up patch description, doc, printk text] Signed-off-by: Paul Walmsley --- .../admin-guide/kernel-parameters.txt | 8 +++ arch/riscv/include/asm/usercfi.h | 9 +++ arch/riscv/kernel/cpufeature.c | 7 ++- arch/riscv/kernel/usercfi.c | 58 +++++++++++++++---- 4 files changed, 69 insertions(+), 13 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1058f2a6d6a8..1a355e701a80 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6606,6 +6606,14 @@ Kernel parameters replacement properties are not found. See the Kconfig entry for RISCV_ISA_FALLBACK. + riscv_nousercfi= + all Disable user CFI ABI to userspace even if cpu extension + are available. + bcfi Disable user backward CFI ABI to userspace even if + the shadow stack extension is available. + fcfi Disable user forward CFI ABI to userspace even if the + landing pad extension is available. + ro [KNL] Mount root device read-only on boot rodata= [KNL,EARLY] diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index ec4b8a53eb74..7495baae1e3c 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -5,6 +5,10 @@ #ifndef _ASM_RISCV_USERCFI_H #define _ASM_RISCV_USERCFI_H +#define CMDLINE_DISABLE_RISCV_USERCFI_FCFI 1 +#define CMDLINE_DISABLE_RISCV_USERCFI_BCFI 2 +#define CMDLINE_DISABLE_RISCV_USERCFI 3 + #ifndef __ASSEMBLER__ #include #include @@ -13,6 +17,8 @@ struct task_struct; struct kernel_clone_args; +extern unsigned long riscv_nousercfi; + #ifdef CONFIG_RISCV_USER_CFI struct cfi_state { unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ @@ -83,6 +89,9 @@ void set_indir_lp_lock(struct task_struct *task); #endif /* CONFIG_RISCV_USER_CFI */ +bool is_user_shstk_enabled(void); +bool is_user_lpad_enabled(void); + #endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_USERCFI_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 6827a577f870..1734f9a4c2fd 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -28,6 +28,7 @@ #include #include #include +#include #define NUM_ALPHA_EXTS ('z' - 'a' + 1) @@ -299,7 +300,8 @@ static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, static int riscv_cfilp_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { - if (!IS_ENABLED(CONFIG_RISCV_USER_CFI)) + if (!IS_ENABLED(CONFIG_RISCV_USER_CFI) || + (riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_FCFI)) return -EINVAL; return 0; @@ -308,7 +310,8 @@ static int riscv_cfilp_validate(const struct riscv_isa_ext_data *data, static int riscv_cfiss_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { - if (!IS_ENABLED(CONFIG_RISCV_USER_CFI)) + if (!IS_ENABLED(CONFIG_RISCV_USER_CFI) || + (riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_BCFI)) return -EINVAL; return 0; diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index 7cec00ca1df4..1adba746f164 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -17,6 +17,8 @@ #include #include +unsigned long riscv_nousercfi __read_mostly; + #define SHSTK_ENTRY_SIZE sizeof(void *) bool is_shstk_enabled(struct task_struct *task) @@ -59,7 +61,7 @@ unsigned long get_active_shstk(struct task_struct *task) void set_shstk_status(struct task_struct *task, bool enable) { - if (!cpu_supports_shadow_stack()) + if (!is_user_shstk_enabled()) return; task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0; @@ -89,7 +91,7 @@ bool is_indir_lp_locked(struct task_struct *task) void set_indir_lp_status(struct task_struct *task, bool enable) { - if (!cpu_supports_indirect_br_lp_instr()) + if (!is_user_lpad_enabled()) return; task->thread_info.user_cfi_state.ufcfi_en = enable ? 1 : 0; @@ -257,7 +259,7 @@ SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsi bool set_tok = flags & SHADOW_STACK_SET_TOKEN; unsigned long aligned_size = 0; - if (!cpu_supports_shadow_stack()) + if (!is_user_shstk_enabled()) return -EOPNOTSUPP; /* Anything other than set token should result in invalid param */ @@ -304,7 +306,7 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long addr, size; /* If shadow stack is not supported, return 0 */ - if (!cpu_supports_shadow_stack()) + if (!is_user_shstk_enabled()) return 0; /* @@ -350,7 +352,7 @@ void shstk_release(struct task_struct *tsk) { unsigned long base = 0, size = 0; /* If shadow stack is not supported or not enabled, nothing to release */ - if (!cpu_supports_shadow_stack() || !is_shstk_enabled(tsk)) + if (!is_user_shstk_enabled() || !is_shstk_enabled(tsk)) return; /* @@ -379,7 +381,7 @@ int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *st { unsigned long bcfi_status = 0; - if (!cpu_supports_shadow_stack()) + if (!is_user_shstk_enabled()) return -EINVAL; /* this means shadow stack is enabled on the task */ @@ -393,7 +395,7 @@ int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status) unsigned long size = 0, addr = 0; bool enable_shstk = false; - if (!cpu_supports_shadow_stack()) + if (!is_user_shstk_enabled()) return -EINVAL; /* Reject unknown flags */ @@ -446,7 +448,7 @@ int arch_lock_shadow_stack_status(struct task_struct *task, unsigned long arg) { /* If shtstk not supported or not enabled on task, nothing to lock here */ - if (!cpu_supports_shadow_stack() || + if (!is_user_shstk_enabled() || !is_shstk_enabled(task) || arg != 0) return -EINVAL; @@ -459,7 +461,7 @@ int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *sta { unsigned long fcfi_status = 0; - if (!cpu_supports_indirect_br_lp_instr()) + if (!is_user_lpad_enabled()) return -EINVAL; /* indirect branch tracking is enabled on the task or not */ @@ -472,7 +474,7 @@ int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) { bool enable_indir_lp = false; - if (!cpu_supports_indirect_br_lp_instr()) + if (!is_user_lpad_enabled()) return -EINVAL; /* indirect branch tracking is locked and further can't be modified by user */ @@ -496,7 +498,7 @@ int arch_lock_indir_br_lp_status(struct task_struct *task, * If indirect branch tracking is not supported or not enabled on task, * nothing to lock here */ - if (!cpu_supports_indirect_br_lp_instr() || + if (!is_user_lpad_enabled() || !is_indir_lp_enabled(task) || arg != 0) return -EINVAL; @@ -504,3 +506,37 @@ int arch_lock_indir_br_lp_status(struct task_struct *task, return 0; } + +bool is_user_shstk_enabled(void) +{ + return (cpu_supports_shadow_stack() && + !(riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_BCFI)); +} + +bool is_user_lpad_enabled(void) +{ + return (cpu_supports_indirect_br_lp_instr() && + !(riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_FCFI)); +} + +static int __init setup_global_riscv_enable(char *str) +{ + if (strcmp(str, "all") == 0) + riscv_nousercfi = CMDLINE_DISABLE_RISCV_USERCFI; + + if (strcmp(str, "fcfi") == 0) + riscv_nousercfi |= CMDLINE_DISABLE_RISCV_USERCFI_FCFI; + + if (strcmp(str, "bcfi") == 0) + riscv_nousercfi |= CMDLINE_DISABLE_RISCV_USERCFI_BCFI; + + if (riscv_nousercfi) + pr_info("RISC-V user CFI disabled via cmdline - shadow stack status : %s, landing pad status : %s\n", + (riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_BCFI) ? "disabled" : + "enabled", (riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_FCFI) ? + "disabled" : "enabled"); + + return 1; +} + +__setup("riscv_nousercfi=", setup_global_riscv_enable); From 41213bf2ae6c936f51a79986b37f95da9ecbb970 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:55 -0700 Subject: [PATCH 25/42] riscv: enable kernel access to shadow stack memory via the FWFT SBI call The kernel has to perform shadow stack operations on the user shadow stack. During signal delivery and sigreturn, the shadow stack token must be created and validated respectively. Thus shadow stack access for the kernel must be enabled. In the future, when kernel shadow stacks are enabled, they must be enabled as early as possible for better coverage and to prevent any imbalance between the regular stack and the shadow stack. After 'relocate_enable_mmu' has completed, this is the earliest that it can be enabled. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-22-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated to apply; cleaned up commit message] Signed-off-by: Paul Walmsley --- arch/riscv/kernel/asm-offsets.c | 6 ++++++ arch/riscv/kernel/head.S | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 8a2b2656cb2f..af827448a609 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -533,4 +533,10 @@ void asm_offsets(void) DEFINE(FREGS_A6, offsetof(struct __arch_ftrace_regs, a6)); DEFINE(FREGS_A7, offsetof(struct __arch_ftrace_regs, a7)); #endif +#ifdef CONFIG_RISCV_SBI + DEFINE(SBI_EXT_FWFT, SBI_EXT_FWFT); + DEFINE(SBI_EXT_FWFT_SET, SBI_EXT_FWFT_SET); + DEFINE(SBI_FWFT_SHADOW_STACK, SBI_FWFT_SHADOW_STACK); + DEFINE(SBI_FWFT_SET_FLAG_LOCK, SBI_FWFT_SET_FLAG_LOCK); +#endif } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index bdf3352acf4c..9c99c5ad6fe8 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -15,6 +15,7 @@ #include #include #include +#include #include "efi-header.S" __HEAD @@ -170,6 +171,19 @@ secondary_start_sbi: call relocate_enable_mmu #endif call .Lsetup_trap_vector +#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI) + li a7, SBI_EXT_FWFT + li a6, SBI_EXT_FWFT_SET + li a0, SBI_FWFT_SHADOW_STACK + li a1, 1 /* enable supervisor to access shadow stack access */ + li a2, SBI_FWFT_SET_FLAG_LOCK + ecall + beqz a0, 1f + la a1, riscv_nousercfi + li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI + REG_S a0, (a1) +1: +#endif scs_load_current call smp_callin #endif /* CONFIG_SMP */ @@ -330,6 +344,19 @@ SYM_CODE_START(_start_kernel) la tp, init_task la sp, init_thread_union + THREAD_SIZE addi sp, sp, -PT_SIZE_ON_STACK +#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI) + li a7, SBI_EXT_FWFT + li a6, SBI_EXT_FWFT_SET + li a0, SBI_FWFT_SHADOW_STACK + li a1, 1 /* enable supervisor to access shadow stack access */ + li a2, SBI_FWFT_SET_FLAG_LOCK + ecall + beqz a0, 1f + la a1, riscv_nousercfi + li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI + REG_S a0, (a1) +1: +#endif scs_load_current #ifdef CONFIG_KASAN From 37f57bd3faeac92e898c3381355f4fd2b6a80901 Mon Sep 17 00:00:00 2001 From: Jim Shu Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 26/42] arch/riscv: compile vdso with landing pad and shadow stack note User mode tasks compiled with Zicfilp may call indirectly into the vdso (like hwprobe indirect calls). Add support for compiling landing pads into the vdso. Landing pad instructions in the vdso will be no-ops for tasks which have not enabled landing pads. Furthermore, add support for the C sources of the vdso to be compiled with shadow stack and landing pads enabled as well. Landing pad and shadow stack instructions are emitted only when the VDSO_CFI cflags option is defined during compile. Signed-off-by: Jim Shu Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-23-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description, issues reported by checkpatch] Signed-off-by: Paul Walmsley --- arch/riscv/Makefile | 5 ++- arch/riscv/include/asm/assembler.h | 44 ++++++++++++++++++++++ arch/riscv/kernel/vdso/Makefile | 11 +++++- arch/riscv/kernel/vdso/flush_icache.S | 4 ++ arch/riscv/kernel/vdso/getcpu.S | 4 ++ arch/riscv/kernel/vdso/note.S | 3 ++ arch/riscv/kernel/vdso/rt_sigreturn.S | 4 ++ arch/riscv/kernel/vdso/sys_hwprobe.S | 4 ++ arch/riscv/kernel/vdso/vgetrandom-chacha.S | 5 ++- 9 files changed, 81 insertions(+), 3 deletions(-) diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4c6de57f65ef..41385bce8cc7 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -81,9 +81,12 @@ riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas # Check if the toolchain supports Zabha riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha +KBUILD_BASE_ISA = -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') +export KBUILD_BASE_ISA + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) -KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') +KBUILD_CFLAGS += $(KBUILD_BASE_ISA) KBUILD_AFLAGS += -march=$(riscv-march-y) diff --git a/arch/riscv/include/asm/assembler.h b/arch/riscv/include/asm/assembler.h index 16931712beab..a8df1999118b 100644 --- a/arch/riscv/include/asm/assembler.h +++ b/arch/riscv/include/asm/assembler.h @@ -80,3 +80,47 @@ .endm #endif /* __ASM_ASSEMBLER_H */ + +#if defined(VDSO_CFI) && (__riscv_xlen == 64) +.macro vdso_lpad, label = 0 +lpad \label +.endm +#else +.macro vdso_lpad, label = 0 +.endm +#endif + +/* + * This macro emits a program property note section identifying + * architecture features which require special handling, mainly for + * use in assembly files included in the VDSO. + */ +#define NT_GNU_PROPERTY_TYPE_0 5 +#define GNU_PROPERTY_RISCV_FEATURE_1_AND 0xc0000000 + +#define GNU_PROPERTY_RISCV_FEATURE_1_ZICFILP BIT(0) +#define GNU_PROPERTY_RISCV_FEATURE_1_ZICFISS BIT(1) + +#if defined(VDSO_CFI) && (__riscv_xlen == 64) +#define GNU_PROPERTY_RISCV_FEATURE_1_DEFAULT \ + (GNU_PROPERTY_RISCV_FEATURE_1_ZICFILP | GNU_PROPERTY_RISCV_FEATURE_1_ZICFISS) +#endif + +#ifdef GNU_PROPERTY_RISCV_FEATURE_1_DEFAULT +.macro emit_riscv_feature_1_and, feat = GNU_PROPERTY_RISCV_FEATURE_1_DEFAULT + .pushsection .note.gnu.property, "a" + .p2align 3 + .word 4 + .word 16 + .word NT_GNU_PROPERTY_TYPE_0 + .asciz "GNU" + .word GNU_PROPERTY_RISCV_FEATURE_1_AND + .word 4 + .word \feat + .word 0 + .popsection +.endm +#else +.macro emit_riscv_feature_1_and, feat = 0 +.endm +#endif diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 9ebb5e590f93..272f1d837a80 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -17,6 +17,11 @@ ifdef CONFIG_VDSO_GETRANDOM vdso-syms += getrandom endif +ifdef VDSO_CFI_BUILD +CFI_MARCH = _zicfilp_zicfiss +CFI_FULL = -fcf-protection=full +endif + # Files to link into the vdso obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o @@ -27,6 +32,10 @@ endif ccflags-y := -fno-stack-protector ccflags-y += -DDISABLE_BRANCH_PROFILING ccflags-y += -fno-builtin +ccflags-y += $(KBUILD_BASE_ISA)$(CFI_MARCH) +ccflags-y += $(CFI_FULL) +asflags-y += $(KBUILD_BASE_ISA)$(CFI_MARCH) +asflags-y += $(CFI_FULL) ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) @@ -79,7 +88,7 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE # The DSO images are built using a special linker script # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold_and_check = VDSOLD $@ - cmd_vdsold_and_check = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ + cmd_vdsold_and_check = $(LD) $(CFI_FULL) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ rm $@.tmp && \ $(cmd_vdso_check) diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S index 8f884227e8bc..e4c56970905e 100644 --- a/arch/riscv/kernel/vdso/flush_icache.S +++ b/arch/riscv/kernel/vdso/flush_icache.S @@ -5,11 +5,13 @@ #include #include +#include .text /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ SYM_FUNC_START(__vdso_flush_icache) .cfi_startproc + vdso_lpad #ifdef CONFIG_SMP li a7, __NR_riscv_flush_icache ecall @@ -20,3 +22,5 @@ SYM_FUNC_START(__vdso_flush_icache) ret .cfi_endproc SYM_FUNC_END(__vdso_flush_icache) + +emit_riscv_feature_1_and diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S index 9c1bd531907f..5c1ecc4e1465 100644 --- a/arch/riscv/kernel/vdso/getcpu.S +++ b/arch/riscv/kernel/vdso/getcpu.S @@ -5,14 +5,18 @@ #include #include +#include .text /* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */ SYM_FUNC_START(__vdso_getcpu) .cfi_startproc + vdso_lpad /* For now, just do the syscall. */ li a7, __NR_getcpu ecall ret .cfi_endproc SYM_FUNC_END(__vdso_getcpu) + +emit_riscv_feature_1_and diff --git a/arch/riscv/kernel/vdso/note.S b/arch/riscv/kernel/vdso/note.S index 2a956c942211..3d92cc956b95 100644 --- a/arch/riscv/kernel/vdso/note.S +++ b/arch/riscv/kernel/vdso/note.S @@ -6,7 +6,10 @@ #include #include +#include ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END + +emit_riscv_feature_1_and diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S index 3dc022aa8931..e82987dc3739 100644 --- a/arch/riscv/kernel/vdso/rt_sigreturn.S +++ b/arch/riscv/kernel/vdso/rt_sigreturn.S @@ -5,12 +5,16 @@ #include #include +#include .text SYM_FUNC_START(__vdso_rt_sigreturn) .cfi_startproc .cfi_signal_frame + vdso_lpad li a7, __NR_rt_sigreturn ecall .cfi_endproc SYM_FUNC_END(__vdso_rt_sigreturn) + +emit_riscv_feature_1_and diff --git a/arch/riscv/kernel/vdso/sys_hwprobe.S b/arch/riscv/kernel/vdso/sys_hwprobe.S index 77e57f830521..f1694451a60c 100644 --- a/arch/riscv/kernel/vdso/sys_hwprobe.S +++ b/arch/riscv/kernel/vdso/sys_hwprobe.S @@ -3,13 +3,17 @@ #include #include +#include .text SYM_FUNC_START(riscv_hwprobe) .cfi_startproc + vdso_lpad li a7, __NR_riscv_hwprobe ecall ret .cfi_endproc SYM_FUNC_END(riscv_hwprobe) + +emit_riscv_feature_1_and diff --git a/arch/riscv/kernel/vdso/vgetrandom-chacha.S b/arch/riscv/kernel/vdso/vgetrandom-chacha.S index 5f0dad8f2373..916ab30a88f7 100644 --- a/arch/riscv/kernel/vdso/vgetrandom-chacha.S +++ b/arch/riscv/kernel/vdso/vgetrandom-chacha.S @@ -7,6 +7,7 @@ #include #include +#include .text @@ -74,7 +75,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) #define _20 20, 20, 20, 20 #define _24 24, 24, 24, 24 #define _25 25, 25, 25, 25 - + vdso_lpad /* * The ABI requires s0-s9 saved. * This does not violate the stack-less requirement: no sensitive data @@ -247,3 +248,5 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) ret SYM_FUNC_END(__arch_chacha20_blocks_nostack) + +emit_riscv_feature_1_and From ccad8c1336b6511e3c7ca5c02f797b1fd2cf67e1 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 27/42] arch/riscv: add dual vdso creation logic and select vdso based on hw Shadow stack instructions are taken from the Zimop ISA extension, which is mandated on RVA23. Any userspace with shadow stack instructions in it will fault on hardware that doesn't have support for Zimop. Thus, a shadow stack-enabled userspace can't be run on hardware that doesn't support Zimop. It's not known how Linux userspace providers will respond to this kind of binary fragmentation. In order to keep kernel portable across different hardware, 'arch/riscv/kernel/vdso_cfi' is created which has Makefile logic to compile 'arch/riscv/kernel/vdso' sources with CFI flags, and 'arch/riscv/kernel/vdso.c' is modified to select the appropriate vdso depending on whether the underlying CPU implements the Zimop extension. Since the offset of vdso symbols will change due to having two different vdso binaries, there is added logic to include a new generated vdso offset header and dynamically select the offset (like for rt_sigreturn). Signed-off-by: Deepak Gupta Acked-by: Charles Mirabile Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-24-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley --- arch/riscv/Makefile | 3 +++ arch/riscv/include/asm/vdso.h | 13 +++++++++- arch/riscv/kernel/Makefile | 1 + arch/riscv/kernel/vdso.c | 7 ++++++ arch/riscv/kernel/vdso/Makefile | 29 +++++++++++++++------- arch/riscv/kernel/vdso/gen_vdso_offsets.sh | 4 ++- arch/riscv/kernel/vdso_cfi/Makefile | 25 +++++++++++++++++++ arch/riscv/kernel/vdso_cfi/vdso-cfi.S | 11 ++++++++ 8 files changed, 82 insertions(+), 11 deletions(-) create mode 100644 arch/riscv/kernel/vdso_cfi/Makefile create mode 100644 arch/riscv/kernel/vdso_cfi/vdso-cfi.S diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 41385bce8cc7..371da75a47f9 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -161,6 +161,8 @@ ifeq ($(CONFIG_MMU),y) prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h + $(if $(CONFIG_RISCV_USER_CFI),$(Q)$(MAKE) \ + $(build)=arch/riscv/kernel/vdso_cfi include/generated/vdso-cfi-offsets.h) $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \ $(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h) @@ -168,6 +170,7 @@ endif endif vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg +vdso-install-$(CONFIG_RISCV_USER_CFI) += arch/riscv/kernel/vdso_cfi/vdso-cfi.so.dbg vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader loader.bin xipImage vmlinuz.efi diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index f80357fe24d1..35bf830a5576 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -18,9 +18,19 @@ #ifndef __ASSEMBLER__ #include +#ifdef CONFIG_RISCV_USER_CFI +#include +#endif +#ifdef CONFIG_RISCV_USER_CFI #define VDSO_SYMBOL(base, name) \ - (void __user *)((unsigned long)(base) + __vdso_##name##_offset) + (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZIMOP) ? \ + (void __user *)((unsigned long)(base) + __vdso_##name##_cfi_offset) : \ + (void __user *)((unsigned long)(base) + __vdso_##name##_offset)) +#else +#define VDSO_SYMBOL(base, name) \ + ((void __user *)((unsigned long)(base) + __vdso_##name##_offset)) +#endif #ifdef CONFIG_COMPAT #include @@ -33,6 +43,7 @@ extern char compat_vdso_start[], compat_vdso_end[]; #endif /* CONFIG_COMPAT */ extern char vdso_start[], vdso_end[]; +extern char vdso_cfi_start[], vdso_cfi_end[]; #endif /* !__ASSEMBLER__ */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 0a0a3fb02d63..cabb99cadfb6 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -73,6 +73,7 @@ obj-y += vendor_extensions/ obj-y += probes/ obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ +obj-$(CONFIG_RISCV_USER_CFI) += vdso_cfi/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 3a8e038b10a2..43f70198ac3c 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -98,6 +98,13 @@ static struct __vdso_info compat_vdso_info __ro_after_init = { static int __init vdso_init(void) { + /* Hart implements zimop, expose cfi compiled vdso */ + if (IS_ENABLED(CONFIG_RISCV_USER_CFI) && + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZIMOP)) { + vdso_info.vdso_code_start = vdso_cfi_start; + vdso_info.vdso_code_end = vdso_cfi_end; + } + __vdso_init(&vdso_info); #ifdef CONFIG_COMPAT __vdso_init(&compat_vdso_info); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 272f1d837a80..a842dc034571 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -20,6 +20,10 @@ endif ifdef VDSO_CFI_BUILD CFI_MARCH = _zicfilp_zicfiss CFI_FULL = -fcf-protection=full +CFI_SUFFIX = -cfi +OFFSET_SUFFIX = _cfi +ccflags-y += -DVDSO_CFI=1 +asflags-y += -DVDSO_CFI=1 endif # Files to link into the vdso @@ -48,13 +52,20 @@ endif CFLAGS_hwprobe.o += -fPIC # Build rules -targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds +vdso_offsets := vdso$(if $(VDSO_CFI_BUILD),$(CFI_SUFFIX),)-offsets.h +vdso_o := vdso$(if $(VDSO_CFI_BUILD),$(CFI_SUFFIX),).o +vdso_so := vdso$(if $(VDSO_CFI_BUILD),$(CFI_SUFFIX),).so +vdso_so_dbg := vdso$(if $(VDSO_CFI_BUILD),$(CFI_SUFFIX),).so.dbg +vdso_lds := vdso.lds + +targets := $(obj-vdso) $(vdso_so) $(vdso_so_dbg) $(vdso_lds) + obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -obj-y += vdso.o -CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +obj-y += vdso$(if $(VDSO_CFI_BUILD),$(CFI_SUFFIX),).o +CPPFLAGS_$(vdso_lds) += -P -C -U$(ARCH) ifneq ($(filter vgettimeofday, $(vdso-syms)),) -CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY +CPPFLAGS_$(vdso_lds) += -DHAS_VGETTIMEOFDAY endif # Disable -pg to prevent insert call site @@ -63,12 +74,12 @@ CFLAGS_REMOVE_getrandom.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) # Force dependency -$(obj)/vdso.o: $(obj)/vdso.so +$(obj)/$(vdso_o): $(obj)/$(vdso_so) # link rule for the .so file, .lds has to be first -$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE +$(obj)/$(vdso_so_dbg): $(obj)/$(vdso_lds) $(obj-vdso) FORCE $(call if_changed,vdsold_and_check) -LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ +LDFLAGS_$(vdso_so_dbg) = -shared -soname=linux-vdso.so.1 \ --build-id=sha1 --eh-frame-hdr # strip rule for the .so file @@ -79,9 +90,9 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # Generate VDSO offsets using helper script gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ - cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) $(OFFSET_SUFFIX) | LC_ALL=C sort > $@ -include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE +include/generated/$(vdso_offsets): $(obj)/$(vdso_so_dbg) FORCE $(call if_changed,vdsosym) # actual build commands diff --git a/arch/riscv/kernel/vdso/gen_vdso_offsets.sh b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh index c2e5613f3495..bd5d5afaaa14 100755 --- a/arch/riscv/kernel/vdso/gen_vdso_offsets.sh +++ b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh @@ -2,4 +2,6 @@ # SPDX-License-Identifier: GPL-2.0 LC_ALL=C -sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define \2_offset\t0x\1/p' +SUFFIX=${1:-""} +sed -n -e \ +'s/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define \2'$SUFFIX'_offset\t0x\1/p' diff --git a/arch/riscv/kernel/vdso_cfi/Makefile b/arch/riscv/kernel/vdso_cfi/Makefile new file mode 100644 index 000000000000..8ebd190782b0 --- /dev/null +++ b/arch/riscv/kernel/vdso_cfi/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# RISC-V VDSO CFI Makefile +# This Makefile builds the VDSO with CFI support when CONFIG_RISCV_USER_CFI is enabled + +# setting VDSO_CFI_BUILD triggers build for vdso differently +VDSO_CFI_BUILD := 1 + +# Set the source directory to the main vdso directory +src := $(srctree)/arch/riscv/kernel/vdso + +# Copy all .S and .c files from vdso directory to vdso_cfi object build directory +vdso_c_sources := $(wildcard $(src)/*.c) +vdso_S_sources := $(wildcard $(src)/*.S) +vdso_c_objects := $(addprefix $(obj)/, $(notdir $(vdso_c_sources))) +vdso_S_objects := $(addprefix $(obj)/, $(notdir $(vdso_S_sources))) + +$(vdso_S_objects): $(obj)/%.S: $(src)/%.S + $(Q)cp $< $@ + +$(vdso_c_objects): $(obj)/%.c: $(src)/%.c + $(Q)cp $< $@ + +# Include the main VDSO Makefile which contains all the build rules and sources +# The VDSO_CFI_BUILD variable will be passed to it to enable CFI compilation +include $(src)/Makefile diff --git a/arch/riscv/kernel/vdso_cfi/vdso-cfi.S b/arch/riscv/kernel/vdso_cfi/vdso-cfi.S new file mode 100644 index 000000000000..d426f6accb35 --- /dev/null +++ b/arch/riscv/kernel/vdso_cfi/vdso-cfi.S @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2025 Rivos, Inc + */ + +#define vdso_start vdso_cfi_start +#define vdso_end vdso_cfi_end + +#define __VDSO_PATH "arch/riscv/kernel/vdso_cfi/vdso-cfi.so" + +#include "../vdso/vdso.S" From 22c1e263af2ac7dad3d2af258336318ee4c4a0ae Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 28/42] riscv: create a Kconfig fragment for shadow stack and landing pad support This patch creates a Kconfig fragment for shadow stack support and landing pad instruction support. Shadow stack support and landing pad instruction support can be enabled by selecting 'CONFIG_RISCV_USER_CFI'. Selecting 'CONFIG_RISCV_USER_CFI' wires up the path to enumerate CPU support. If support exists, the kernel will support CPU-assisted user mode CFI. If CONFIG_RISCV_USER_CFI is selected, select 'ARCH_USES_HIGH_VMA_FLAGS', 'ARCH_HAS_USER_SHADOW_STACK' and 'DYNAMIC_SIGFRAME' for riscv. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-25-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description, Kconfig text; added CONFIG_MMU exclusion] Signed-off-by: Paul Walmsley --- arch/riscv/Kconfig | 22 ++++++++++++++++++++++ arch/riscv/configs/hardening.config | 4 ++++ 2 files changed, 26 insertions(+) create mode 100644 arch/riscv/configs/hardening.config diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 6b39f37f769a..7e76b6316425 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -1162,6 +1162,28 @@ config RANDOMIZE_BASE If unsure, say N. +config RISCV_USER_CFI + def_bool y + bool "riscv userspace control flow integrity" + depends on 64BIT && MMU && \ + $(cc-option,-mabi=lp64 -march=rv64ima_zicfiss_zicfilp -fcf-protection=full) + depends on RISCV_ALTERNATIVE + select RISCV_SBI + select ARCH_HAS_USER_SHADOW_STACK + select ARCH_USES_HIGH_VMA_FLAGS + select DYNAMIC_SIGFRAME + help + Provides CPU-assisted control flow integrity to userspace tasks. + Control flow integrity is provided by implementing shadow stack for + backward edge and indirect branch tracking for forward edge. + Shadow stack protection is a hardware feature that detects function + return address corruption. This helps mitigate ROP attacks. + Indirect branch tracking enforces that all indirect branches must land + on a landing pad instruction else CPU will fault. This mitigates against + JOP / COP attacks. Applications must be enabled to use it, and old userspace + does not get protection "for free". + default y. + endmenu # "Kernel features" menu "Boot options" diff --git a/arch/riscv/configs/hardening.config b/arch/riscv/configs/hardening.config new file mode 100644 index 000000000000..089f4cee82f4 --- /dev/null +++ b/arch/riscv/configs/hardening.config @@ -0,0 +1,4 @@ +# RISCV specific kernel hardening options + +# Enable control flow integrity support for usermode. +CONFIG_RISCV_USER_CFI=y From f6eeb67b917238fe2295d27ef0c8fe2cab8de5b5 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 29/42] riscv: add documentation for landing pad / indirect branch tracking Add documentation on landing pad aka indirect branch tracking on riscv and the kernel interfaces exposed for user tasks to enable it. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-26-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up the documentation] Signed-off-by: Paul Walmsley --- Documentation/arch/riscv/index.rst | 1 + Documentation/arch/riscv/zicfilp.rst | 122 +++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 Documentation/arch/riscv/zicfilp.rst diff --git a/Documentation/arch/riscv/index.rst b/Documentation/arch/riscv/index.rst index eecf347ce849..be7237b69682 100644 --- a/Documentation/arch/riscv/index.rst +++ b/Documentation/arch/riscv/index.rst @@ -14,6 +14,7 @@ RISC-V architecture uabi vector cmodx + zicfilp features diff --git a/Documentation/arch/riscv/zicfilp.rst b/Documentation/arch/riscv/zicfilp.rst new file mode 100644 index 000000000000..78a3e01ff68c --- /dev/null +++ b/Documentation/arch/riscv/zicfilp.rst @@ -0,0 +1,122 @@ +.. SPDX-License-Identifier: GPL-2.0 + +:Author: Deepak Gupta +:Date: 12 January 2024 + +==================================================== +Tracking indirect control transfers on RISC-V Linux +==================================================== + +This document briefly describes the interface provided to userspace by Linux +to enable indirect branch tracking for user mode applications on RISC-V. + +1. Feature Overview +-------------------- + +Memory corruption issues usually result in crashes. However, in the +hands of a creative adversary, these can result in a variety of +security issues. + +Some of those security issues can be code re-use attacks, where an +adversary can use corrupt function pointers, chaining them together to +perform jump oriented programming (JOP) or call oriented programming +(COP) and thus compromise control flow integrity (CFI) of the program. + +Function pointers live in read-write memory and thus are susceptible +to corruption. This can allow an adversary to control the program +counter (PC) value. On RISC-V, the zicfilp extension enforces a +restriction on such indirect control transfers: + +- Indirect control transfers must land on a landing pad instruction ``lpad``. + There are two exceptions to this rule: + + - rs1 = x1 or rs1 = x5, i.e. a return from a function and returns are + protected using shadow stack (see zicfiss.rst) + + - rs1 = x7. On RISC-V, the compiler usually does the following to reach a + function which is beyond the offset of possible J-type instruction:: + + auipc x7, + jalr (x7) + + This form of indirect control transfer is immutable and doesn't + rely on memory. Thus rs1=x7 is exempted from tracking and + these are considered software guarded jumps. + +The ``lpad`` instruction is a pseudo-op of ``auipc rd, `` +with ``rd=x0``. This is a HINT op. The ``lpad`` instruction must be +aligned on a 4 byte boundary. It compares the 20 bit immediate with +x7. If ``imm_20bit`` == 0, the CPU doesn't perform any comparison with +``x7``. If ``imm_20bit`` != 0, then ``imm_20bit`` must match ``x7`` +else CPU will raise ``software check exception`` (``cause=18``) with +``*tval = 2``. + +The compiler can generate a hash over function signatures and set them +up (truncated to 20 bits) in x7 at callsites. Function prologues can +have ``lpad`` instructions encoded with the same function hash. This +further reduces the number of valid program counter addresses a call +site can reach. + +2. ELF and psABI +----------------- + +The toolchain sets up :c:macro:`GNU_PROPERTY_RISCV_FEATURE_1_FCFI` for +property :c:macro:`GNU_PROPERTY_RISCV_FEATURE_1_AND` in the notes +section of the object file. + +3. Linux enabling +------------------ + +User space programs can have multiple shared objects loaded in their +address spaces. It's a difficult task to make sure all the +dependencies have been compiled with indirect branch support. Thus +it's left to the dynamic loader to enable indirect branch tracking for +the program. + +4. prctl() enabling +-------------------- + +:c:macro:`PR_SET_INDIR_BR_LP_STATUS` / :c:macro:`PR_GET_INDIR_BR_LP_STATUS` / +:c:macro:`PR_LOCK_INDIR_BR_LP_STATUS` are three prctls added to manage indirect +branch tracking. These prctls are architecture-agnostic and return -EINVAL if +the underlying functionality is not supported. + +* prctl(PR_SET_INDIR_BR_LP_STATUS, unsigned long arg) + +If arg1 is :c:macro:`PR_INDIR_BR_LP_ENABLE` and if CPU supports +``zicfilp`` then the kernel will enable indirect branch tracking for the +task. The dynamic loader can issue this :c:macro:`prctl` once it has +determined that all the objects loaded in the address space support +indirect branch tracking. Additionally, if there is a `dlopen` to an +object which wasn't compiled with ``zicfilp``, the dynamic loader can +issue this prctl with arg1 set to 0 (i.e. :c:macro:`PR_INDIR_BR_LP_ENABLE` +cleared). + +* prctl(PR_GET_INDIR_BR_LP_STATUS, unsigned long * arg) + +Returns the current status of indirect branch tracking. If enabled +it'll return :c:macro:`PR_INDIR_BR_LP_ENABLE` + +* prctl(PR_LOCK_INDIR_BR_LP_STATUS, unsigned long arg) + +Locks the current status of indirect branch tracking on the task. User +space may want to run with a strict security posture and wouldn't want +loading of objects without ``zicfilp`` support in them, to disallow +disabling of indirect branch tracking. In this case, user space can +use this prctl to lock the current settings. + +5. violations related to indirect branch tracking +-------------------------------------------------- + +Pertaining to indirect branch tracking, the CPU raises a software +check exception in the following conditions: + +- missing ``lpad`` after indirect call / jmp +- ``lpad`` not on 4 byte boundary +- ``imm_20bit`` embedded in ``lpad`` instruction doesn't match with ``x7`` + +In all 3 cases, ``*tval = 2`` is captured and software check exception is +raised (``cause=18``). + +The kernel will treat this as :c:macro:`SIGSEGV` with code = +:c:macro:`SEGV_CPERR` and follow the normal course of signal delivery. From c8350aa2ed7828175468696ae95f34a431342175 Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 30/42] riscv: add documentation for shadow stack Add documentation on shadow stack for user mode on riscv and the kernel interfaces exposed for user tasks to enable it. Reviewed-by: Zong Li Signed-off-by: Deepak Gupta Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-27-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up the documentation, patch description] Signed-off-by: Paul Walmsley --- Documentation/arch/riscv/index.rst | 1 + Documentation/arch/riscv/zicfiss.rst | 194 +++++++++++++++++++++++++++ 2 files changed, 195 insertions(+) create mode 100644 Documentation/arch/riscv/zicfiss.rst diff --git a/Documentation/arch/riscv/index.rst b/Documentation/arch/riscv/index.rst index be7237b69682..e240eb0ceb70 100644 --- a/Documentation/arch/riscv/index.rst +++ b/Documentation/arch/riscv/index.rst @@ -15,6 +15,7 @@ RISC-V architecture vector cmodx zicfilp + zicfiss features diff --git a/Documentation/arch/riscv/zicfiss.rst b/Documentation/arch/riscv/zicfiss.rst new file mode 100644 index 000000000000..4d5f7addc26d --- /dev/null +++ b/Documentation/arch/riscv/zicfiss.rst @@ -0,0 +1,194 @@ +.. SPDX-License-Identifier: GPL-2.0 + +:Author: Deepak Gupta +:Date: 12 January 2024 + +========================================================= +Shadow stack to protect function returns on RISC-V Linux +========================================================= + +This document briefly describes the interface provided to userspace by Linux +to enable shadow stacks for user mode applications on RISC-V. + +1. Feature Overview +-------------------- + +Memory corruption issues usually result in crashes. However, in the +hands of a creative adversary, these issues can result in a variety of +security problems. + +Some of those security issues can be code re-use attacks on programs +where an adversary can use corrupt return addresses present on the +stack. chaining them together to perform return oriented programming +(ROP) and thus compromising the control flow integrity (CFI) of the +program. + +Return addresses live on the stack in read-write memory. Therefore +they are susceptible to corruption, which allows an adversary to +control the program counter. On RISC-V, the ``zicfiss`` extension +provides an alternate stack (the "shadow stack") on which return +addresses can be safely placed in the prologue of the function and +retrieved in the epilogue. The ``zicfiss`` extension makes the +following changes: + +- PTE encodings for shadow stack virtual memory + An earlier reserved encoding in first stage translation i.e. + PTE.R=0, PTE.W=1, PTE.X=0 becomes the PTE encoding for shadow stack pages. + +- The ``sspush x1/x5`` instruction pushes (stores) ``x1/x5`` to shadow stack. + +- The ``sspopchk x1/x5`` instruction pops (loads) from shadow stack and compares + with ``x1/x5`` and if not equal, the CPU raises a ``software check exception`` + with ``*tval = 3`` + +The compiler toolchain ensures that function prologues have ``sspush +x1/x5`` to save the return address on shadow stack in addition to the +regular stack. Similarly, function epilogues have ``ld x5, +offset(x2)`` followed by ``sspopchk x5`` to ensure that a popped value +from the regular stack matches with the popped value from the shadow +stack. + +2. Shadow stack protections and linux memory manager +----------------------------------------------------- + +As mentioned earlier, shadow stacks get new page table encodings that +have some special properties assigned to them, along with instructions +that operate on the shadow stacks: + +- Regular stores to shadow stack memory raise store access faults. This + protects shadow stack memory from stray writes. + +- Regular loads from shadow stack memory are allowed. This allows + stack trace utilities or backtrace functions to read the true call + stack and ensure that it has not been tampered with. + +- Only shadow stack instructions can generate shadow stack loads or + shadow stack stores. + +- Shadow stack loads and stores on read-only memory raise AMO/store + page faults. Thus both ``sspush x1/x5`` and ``sspopchk x1/x5`` will + raise AMO/store page fault. This simplies COW handling in kernel + during fork(). The kernel can convert shadow stack pages into + read-only memory (as it does for regular read-write memory). As + soon as subsequent ``sspush`` or ``sspopchk`` instructions in + userspace are encountered, the kernel can perform COW. + +- Shadow stack loads and stores on read-write or read-write-execute + memory raise an access fault. This is a fatal condition because + shadow stack loads and stores should never be operating on + read-write or read-write-execute memory. + +3. ELF and psABI +----------------- + +The toolchain sets up :c:macro:`GNU_PROPERTY_RISCV_FEATURE_1_BCFI` for +property :c:macro:`GNU_PROPERTY_RISCV_FEATURE_1_AND` in the notes +section of the object file. + +4. Linux enabling +------------------ + +User space programs can have multiple shared objects loaded in their +address space. It's a difficult task to make sure all the +dependencies have been compiled with shadow stack support. Thus +it's left to the dynamic loader to enable shadow stacks for the +program. + +5. prctl() enabling +-------------------- + +:c:macro:`PR_SET_SHADOW_STACK_STATUS` / :c:macro:`PR_GET_SHADOW_STACK_STATUS` / +:c:macro:`PR_LOCK_SHADOW_STACK_STATUS` are three prctls added to manage shadow +stack enabling for tasks. These prctls are architecture-agnostic and return +-EINVAL if not implemented. + +* prctl(PR_SET_SHADOW_STACK_STATUS, unsigned long arg) + +If arg = :c:macro:`PR_SHADOW_STACK_ENABLE` and if CPU supports +``zicfiss`` then the kernel will enable shadow stacks for the task. +The dynamic loader can issue this :c:macro:`prctl` once it has +determined that all the objects loaded in address space have support +for shadow stacks. Additionally, if there is a :c:macro:`dlopen` to +an object which wasn't compiled with ``zicfiss``, the dynamic loader +can issue this prctl with arg set to 0 (i.e. +:c:macro:`PR_SHADOW_STACK_ENABLE` being clear) + +* prctl(PR_GET_SHADOW_STACK_STATUS, unsigned long * arg) + +Returns the current status of indirect branch tracking. If enabled +it'll return :c:macro:`PR_SHADOW_STACK_ENABLE`. + +* prctl(PR_LOCK_SHADOW_STACK_STATUS, unsigned long arg) + +Locks the current status of shadow stack enabling on the +task. Userspace may want to run with a strict security posture and +wouldn't want loading of objects without ``zicfiss`` support. In this +case userspace can use this prctl to disallow disabling of shadow +stacks on the current task. + +5. violations related to returns with shadow stack enabled +----------------------------------------------------------- + +Pertaining to shadow stacks, the CPU raises a ``software check +exception`` upon executing ``sspopchk x1/x5`` if ``x1/x5`` doesn't +match the top of shadow stack. If a mismatch happens, then the CPU +sets ``*tval = 3`` and raises the exception. + +The Linux kernel will treat this as a :c:macro:`SIGSEGV` with code = +:c:macro:`SEGV_CPERR` and follow the normal course of signal delivery. + +6. Shadow stack tokens +----------------------- + +Regular stores on shadow stacks are not allowed and thus can't be +tampered with via arbitrary stray writes. However, one method of +pivoting / switching to a shadow stack is simply writing to the CSR +``CSR_SSP``. This will change the active shadow stack for the +program. Writes to ``CSR_SSP`` in the program should be mostly +limited to context switches, stack unwinds, or longjmp or similar +mechanisms (like context switching of Green Threads) in languages like +Go and Rust. CSR_SSP writes can be problematic because an attacker can +use memory corruption bugs and leverage context switching routines to +pivot to any shadow stack. Shadow stack tokens can help mitigate this +problem by making sure that: + +- When software is switching away from a shadow stack, the shadow + stack pointer should be saved on the shadow stack itself (this is + called the ``shadow stack token``). + +- When software is switching to a shadow stack, it should read the + ``shadow stack token`` from the shadow stack pointer and verify that + the ``shadow stack token`` itself is a pointer to the shadow stack + itself. + +- Once the token verification is done, software can perform the write + to ``CSR_SSP`` to switch shadow stacks. + +Here "software" could refer to the user mode task runtime itself, +managing various contexts as part of a single thread. Or "software" +could refer to the kernel, when the kernel has to deliver a signal to +a user task and must save the shadow stack pointer. The kernel can +perform similar procedure itself by saving a token on the user mode +task's shadow stack. This way, whenever :c:macro:`sigreturn` happens, +the kernel can read and verify the token and then switch to the shadow +stack. Using this mechanism, the kernel helps the user task so that +any corruption issue in the user task is not exploited by adversaries +arbitrarily using :c:macro:`sigreturn`. Adversaries will have to make +sure that there is a valid ``shadow stack token`` in addition to +invoking :c:macro:`sigreturn`. + +7. Signal shadow stack +----------------------- +The following structure has been added to sigcontext for RISC-V:: + + struct __sc_riscv_cfi_state { + unsigned long ss_ptr; + }; + +As part of signal delivery, the shadow stack token is saved on the +current shadow stack itself. The updated pointer is saved away in the +:c:macro:`ss_ptr` field in :c:macro:`__sc_riscv_cfi_state` under +:c:macro:`sigcontext`. The existing shadow stack allocation is used +for signal delivery. During :c:macro:`sigreturn`, kernel will obtain +:c:macro:`ss_ptr` from :c:macro:`sigcontext`, verify the saved +token on the shadow stack, and switch the shadow stack. From d30c1683aaecb93d2ab95685dc4300a33d3cea7a Mon Sep 17 00:00:00 2001 From: Deepak Gupta Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 31/42] kselftest/riscv: add kselftest for user mode CFI Add a kselftest for RISC-V control flow integrity implementation for user mode. There is not a lot going on in the kernel to enable landing pad for user mode. CFI selftests are intended to be compiled with a zicfilp and zicfiss enabled compiler. This kselftest simply checks if landing pads and shadow stacks for the process are enabled or not and executes ptrace selftests on CFI. The selftest then registers a SIGSEGV signal handler. Any control flow violations are reported as SIGSEGV with si_code = SEGV_CPERR. The test will fail on receiving any SEGV_CPERR. The shadow stack part has more changes in the kernel, and thus there are separate tests for that. - Exercise 'map_shadow_stack' syscall - 'fork' test to make sure COW works for shadow stack pages - gup tests Kernel uses FOLL_FORCE when access happens to memory via /proc//mem. Not breaking that for shadow stack. - signal test. Make sure signal delivery results in token creation on shadow stack and consumes (and verifies) token on sigreturn - shadow stack protection test. attempts to write using regular store instruction on shadow stack memory must result in access faults - ptrace test: adds landing pad violation, clears ELP and continues In case the toolchain doesn't support the CFI extension, the CFI kselftest won't be built. Test output =========== """ TAP version 13 1..5 This is to ensure shadow stack is indeed enabled and working This is to ensure shadow stack is indeed enabled and working ok 1 shstk fork test ok 2 map shadow stack syscall ok 3 shadow stack gup tests ok 4 shadow stack signal tests ok 5 memory protections of shadow stack memory """ Suggested-by: Charlie Jenkins Signed-off-by: Charlie Jenkins Signed-off-by: Deepak Gupta Tested-by: Andreas Korb # QEMU, custom CVA6 Tested-by: Valentin Haudiquet Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-28-b55691eacf4f@rivosinc.com [pjw@kernel.org: updated to apply; cleaned up patch description, code comments] Signed-off-by: Paul Walmsley --- tools/testing/selftests/riscv/Makefile | 2 +- tools/testing/selftests/riscv/cfi/.gitignore | 2 + tools/testing/selftests/riscv/cfi/Makefile | 23 ++ .../testing/selftests/riscv/cfi/cfi_rv_test.h | 82 ++++ tools/testing/selftests/riscv/cfi/cfitests.c | 173 ++++++++ .../testing/selftests/riscv/cfi/shadowstack.c | 385 ++++++++++++++++++ .../testing/selftests/riscv/cfi/shadowstack.h | 27 ++ 7 files changed, 693 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/riscv/cfi/.gitignore create mode 100644 tools/testing/selftests/riscv/cfi/Makefile create mode 100644 tools/testing/selftests/riscv/cfi/cfi_rv_test.h create mode 100644 tools/testing/selftests/riscv/cfi/cfitests.c create mode 100644 tools/testing/selftests/riscv/cfi/shadowstack.c create mode 100644 tools/testing/selftests/riscv/cfi/shadowstack.h diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile index 099b8c1f46f8..5671b4405a12 100644 --- a/tools/testing/selftests/riscv/Makefile +++ b/tools/testing/selftests/riscv/Makefile @@ -5,7 +5,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),riscv)) -RISCV_SUBTARGETS ?= abi hwprobe mm sigreturn vector +RISCV_SUBTARGETS ?= abi hwprobe mm sigreturn vector cfi else RISCV_SUBTARGETS := endif diff --git a/tools/testing/selftests/riscv/cfi/.gitignore b/tools/testing/selftests/riscv/cfi/.gitignore new file mode 100644 index 000000000000..c1faf7ca4346 --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/.gitignore @@ -0,0 +1,2 @@ +cfitests +shadowstack diff --git a/tools/testing/selftests/riscv/cfi/Makefile b/tools/testing/selftests/riscv/cfi/Makefile new file mode 100644 index 000000000000..96a4dc4b69c3 --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/Makefile @@ -0,0 +1,23 @@ +CFLAGS += $(KHDR_INCLUDES) +CFLAGS += -I$(top_srcdir)/tools/include + +CFLAGS += -march=rv64gc_zicfilp_zicfiss -fcf-protection=full + +# Check for zicfi* extensions needs cross compiler +# which is not set until lib.mk is included +ifeq ($(LLVM)$(CC),cc) +CC := $(CROSS_COMPILE)gcc +endif + + +ifeq ($(shell $(CC) $(CFLAGS) -nostdlib -xc /dev/null -o /dev/null > /dev/null 2>&1; echo $$?),0) +TEST_GEN_PROGS := cfitests + +$(OUTPUT)/cfitests: cfitests.c shadowstack.c + $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^ +else + +$(shell echo "Toolchain doesn't support CFI, skipping CFI kselftest." >&2) +endif + +include ../../lib.mk diff --git a/tools/testing/selftests/riscv/cfi/cfi_rv_test.h b/tools/testing/selftests/riscv/cfi/cfi_rv_test.h new file mode 100644 index 000000000000..1c8043f2b778 --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/cfi_rv_test.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef SELFTEST_RISCV_CFI_H +#define SELFTEST_RISCV_CFI_H +#include +#include +#include "shadowstack.h" + +#define CHILD_EXIT_CODE_SSWRITE 10 +#define CHILD_EXIT_CODE_SIG_TEST 11 + +#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ +({ \ + register long _num __asm__ ("a7") = (num); \ + register long _arg1 __asm__ ("a0") = (long)(arg1); \ + register long _arg2 __asm__ ("a1") = (long)(arg2); \ + register long _arg3 __asm__ ("a2") = (long)(arg3); \ + register long _arg4 __asm__ ("a3") = (long)(arg4); \ + register long _arg5 __asm__ ("a4") = (long)(arg5); \ + \ + __asm__ volatile( \ + "ecall\n" \ + : "+r" \ + (_arg1) \ + : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ + "r"(_num) \ + : "memory", "cc" \ + ); \ + _arg1; \ +}) + +#define my_syscall3(num, arg1, arg2, arg3) \ +({ \ + register long _num __asm__ ("a7") = (num); \ + register long _arg1 __asm__ ("a0") = (long)(arg1); \ + register long _arg2 __asm__ ("a1") = (long)(arg2); \ + register long _arg3 __asm__ ("a2") = (long)(arg3); \ + \ + __asm__ volatile( \ + "ecall\n" \ + : "+r" (_arg1) \ + : "r"(_arg2), "r"(_arg3), \ + "r"(_num) \ + : "memory", "cc" \ + ); \ + _arg1; \ +}) + +#ifndef __NR_prctl +#define __NR_prctl 167 +#endif + +#ifndef __NR_map_shadow_stack +#define __NR_map_shadow_stack 453 +#endif + +#define CSR_SSP 0x011 + +#ifdef __ASSEMBLY__ +#define __ASM_STR(x) x +#else +#define __ASM_STR(x) #x +#endif + +#define csr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \ + : "=r" (__v) : \ + : "memory"); \ + __v; \ +}) + +#define csr_write(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#endif diff --git a/tools/testing/selftests/riscv/cfi/cfitests.c b/tools/testing/selftests/riscv/cfi/cfitests.c new file mode 100644 index 000000000000..298544854415 --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/cfitests.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "../../kselftest.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cfi_rv_test.h" + +/* do not optimize cfi related test functions */ +#pragma GCC push_options +#pragma GCC optimize("O0") + +void sigsegv_handler(int signum, siginfo_t *si, void *uc) +{ + struct ucontext *ctx = (struct ucontext *)uc; + + if (si->si_code == SEGV_CPERR) { + ksft_print_msg("Control flow violation happened somewhere\n"); + ksft_print_msg("PC where violation happened %lx\n", ctx->uc_mcontext.gregs[0]); + exit(-1); + } + + /* all other cases are expected to be of shadow stack write case */ + exit(CHILD_EXIT_CODE_SSWRITE); +} + +bool register_signal_handler(void) +{ + struct sigaction sa = {}; + + sa.sa_sigaction = sigsegv_handler; + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGSEGV, &sa, NULL)) { + ksft_print_msg("Registering signal handler for landing pad violation failed\n"); + return false; + } + + return true; +} + +long ptrace(int request, pid_t pid, void *addr, void *data); + +bool cfi_ptrace_test(void) +{ + pid_t pid; + int status, ret = 0; + unsigned long ptrace_test_num = 0, total_ptrace_tests = 2; + + struct user_cfi_state cfi_reg; + struct iovec iov; + + pid = fork(); + + if (pid == -1) { + ksft_exit_fail_msg("%s: fork failed\n", __func__); + exit(1); + } + + if (pid == 0) { + /* allow to be traced */ + ptrace(PTRACE_TRACEME, 0, NULL, NULL); + raise(SIGSTOP); + asm volatile ("la a5, 1f\n" + "jalr a5\n" + "nop\n" + "nop\n" + "1: nop\n" + : : : "a5"); + exit(11); + /* child shouldn't go beyond here */ + } + + /* parent's code goes here */ + iov.iov_base = &cfi_reg; + iov.iov_len = sizeof(cfi_reg); + + while (ptrace_test_num < total_ptrace_tests) { + memset(&cfi_reg, 0, sizeof(cfi_reg)); + waitpid(pid, &status, 0); + if (WIFSTOPPED(status)) { + errno = 0; + ret = ptrace(PTRACE_GETREGSET, pid, (void *)NT_RISCV_USER_CFI, &iov); + if (ret == -1 && errno) + ksft_exit_fail_msg("%s: PTRACE_GETREGSET failed\n", __func__); + } else { + ksft_exit_fail_msg("%s: child didn't stop, failed\n", __func__); + } + + switch (ptrace_test_num) { +#define CFI_ENABLE_MASK (PTRACE_CFI_LP_EN_STATE | \ + PTRACE_CFI_SS_EN_STATE | \ + PTRACE_CFI_SS_PTR_STATE) + case 0: + if ((cfi_reg.cfi_status.cfi_state & CFI_ENABLE_MASK) != CFI_ENABLE_MASK) + ksft_exit_fail_msg("%s: ptrace_getregset failed, %llu\n", __func__, + cfi_reg.cfi_status.cfi_state); + if (!cfi_reg.shstk_ptr) + ksft_exit_fail_msg("%s: NULL shadow stack pointer, test failed\n", + __func__); + break; + case 1: + if (!(cfi_reg.cfi_status.cfi_state & PTRACE_CFI_ELP_STATE)) + ksft_exit_fail_msg("%s: elp must have been set\n", __func__); + /* clear elp state. not interested in anything else */ + cfi_reg.cfi_status.cfi_state = 0; + + ret = ptrace(PTRACE_SETREGSET, pid, (void *)NT_RISCV_USER_CFI, &iov); + if (ret == -1 && errno) + ksft_exit_fail_msg("%s: PTRACE_GETREGSET failed\n", __func__); + break; + default: + ksft_exit_fail_msg("%s: unreachable switch case\n", __func__); + break; + } + ptrace(PTRACE_CONT, pid, NULL, NULL); + ptrace_test_num++; + } + + waitpid(pid, &status, 0); + if (WEXITSTATUS(status) != 11) + ksft_print_msg("%s, bad return code from child\n", __func__); + + ksft_print_msg("%s, ptrace test succeeded\n", __func__); + return true; +} + +int main(int argc, char *argv[]) +{ + int ret = 0; + unsigned long lpad_status = 0, ss_status = 0; + + ksft_print_header(); + + ksft_print_msg("Starting risc-v tests\n"); + + /* + * Landing pad test. Not a lot of kernel changes to support landing + * pads for user mode except lighting up a bit in senvcfg via a prctl. + * Enable landing pad support throughout the execution of the test binary. + */ + ret = my_syscall5(__NR_prctl, PR_GET_INDIR_BR_LP_STATUS, &lpad_status, 0, 0, 0); + if (ret) + ksft_exit_fail_msg("Get landing pad status failed with %d\n", ret); + + if (!(lpad_status & PR_INDIR_BR_LP_ENABLE)) + ksft_exit_fail_msg("Landing pad is not enabled, should be enabled via glibc\n"); + + ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &ss_status, 0, 0, 0); + if (ret) + ksft_exit_fail_msg("Get shadow stack failed with %d\n", ret); + + if (!(ss_status & PR_SHADOW_STACK_ENABLE)) + ksft_exit_fail_msg("Shadow stack is not enabled, should be enabled via glibc\n"); + + if (!register_signal_handler()) + ksft_exit_fail_msg("Registering signal handler for SIGSEGV failed\n"); + + ksft_print_msg("Landing pad and shadow stack are enabled for binary\n"); + cfi_ptrace_test(); + + execute_shadow_stack_tests(); + + return 0; +} + +#pragma GCC pop_options diff --git a/tools/testing/selftests/riscv/cfi/shadowstack.c b/tools/testing/selftests/riscv/cfi/shadowstack.c new file mode 100644 index 000000000000..f8eed8260a12 --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/shadowstack.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "../../kselftest.h" +#include +#include +#include +#include +#include +#include "shadowstack.h" +#include "cfi_rv_test.h" + +static struct shadow_stack_tests shstk_tests[] = { + { "shstk fork test\n", shadow_stack_fork_test }, + { "map shadow stack syscall\n", shadow_stack_map_test }, + { "shadow stack gup tests\n", shadow_stack_gup_tests }, + { "shadow stack signal tests\n", shadow_stack_signal_test}, + { "memory protections of shadow stack memory\n", shadow_stack_protection_test } +}; + +#define RISCV_SHADOW_STACK_TESTS ARRAY_SIZE(shstk_tests) + +/* do not optimize shadow stack related test functions */ +#pragma GCC push_options +#pragma GCC optimize("O0") + +void zar(void) +{ + unsigned long ssp = 0; + + ssp = csr_read(CSR_SSP); + ksft_print_msg("Spewing out shadow stack ptr: %lx\n" + " This is to ensure shadow stack is indeed enabled and working\n", + ssp); +} + +void bar(void) +{ + zar(); +} + +void foo(void) +{ + bar(); +} + +void zar_child(void) +{ + unsigned long ssp = 0; + + ssp = csr_read(CSR_SSP); + ksft_print_msg("Spewing out shadow stack ptr: %lx\n" + " This is to ensure shadow stack is indeed enabled and working\n", + ssp); +} + +void bar_child(void) +{ + zar_child(); +} + +void foo_child(void) +{ + bar_child(); +} + +typedef void (call_func_ptr)(void); +/* + * call couple of functions to test push/pop. + */ +int shadow_stack_call_tests(call_func_ptr fn_ptr, bool parent) +{ + ksft_print_msg("dummy calls for sspush and sspopchk in context of %s\n", + parent ? "parent" : "child"); + + (fn_ptr)(); + + return 0; +} + +/* forks a thread, and ensure shadow stacks fork out */ +bool shadow_stack_fork_test(unsigned long test_num, void *ctx) +{ + int pid = 0, child_status = 0, parent_pid = 0, ret = 0; + unsigned long ss_status = 0; + + ksft_print_msg("Exercising shadow stack fork test\n"); + + ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &ss_status, 0, 0, 0); + if (ret) { + ksft_exit_skip("Shadow stack get status prctl failed with errorcode %d\n", ret); + return false; + } + + if (!(ss_status & PR_SHADOW_STACK_ENABLE)) + ksft_exit_skip("Shadow stack is not enabled, should be enabled via glibc\n"); + + parent_pid = getpid(); + pid = fork(); + + if (pid) { + ksft_print_msg("Parent pid %d and child pid %d\n", parent_pid, pid); + shadow_stack_call_tests(&foo, true); + } else { + shadow_stack_call_tests(&foo_child, false); + } + + if (pid) { + ksft_print_msg("Waiting on child to finish\n"); + wait(&child_status); + } else { + /* exit child gracefully */ + exit(0); + } + + if (pid && WIFSIGNALED(child_status)) { + ksft_print_msg("Child faulted, fork test failed\n"); + return false; + } + + return true; +} + +/* exercise 'map_shadow_stack', pivot to it and call some functions to ensure it works */ +#define SHADOW_STACK_ALLOC_SIZE 4096 +bool shadow_stack_map_test(unsigned long test_num, void *ctx) +{ + unsigned long shdw_addr; + int ret = 0; + + ksft_print_msg("Exercising shadow stack map test\n"); + + shdw_addr = my_syscall3(__NR_map_shadow_stack, NULL, SHADOW_STACK_ALLOC_SIZE, 0); + + if (((long)shdw_addr) <= 0) { + ksft_print_msg("map_shadow_stack failed with error code %d\n", + (int)shdw_addr); + return false; + } + + ret = munmap((void *)shdw_addr, SHADOW_STACK_ALLOC_SIZE); + + if (ret) { + ksft_print_msg("munmap failed with error code %d\n", ret); + return false; + } + + return true; +} + +/* + * shadow stack protection tests. map a shadow stack and + * validate all memory protections work on it + */ +bool shadow_stack_protection_test(unsigned long test_num, void *ctx) +{ + unsigned long shdw_addr; + unsigned long *write_addr = NULL; + int ret = 0, pid = 0, child_status = 0; + + ksft_print_msg("Exercising shadow stack protection test (WPT)\n"); + + shdw_addr = my_syscall3(__NR_map_shadow_stack, NULL, SHADOW_STACK_ALLOC_SIZE, 0); + + if (((long)shdw_addr) <= 0) { + ksft_print_msg("map_shadow_stack failed with error code %d\n", + (int)shdw_addr); + return false; + } + + write_addr = (unsigned long *)shdw_addr; + pid = fork(); + + /* no child was created, return false */ + if (pid == -1) + return false; + + /* + * try to perform a store from child on shadow stack memory + * it should result in SIGSEGV + */ + if (!pid) { + /* below write must lead to SIGSEGV */ + *write_addr = 0xdeadbeef; + } else { + wait(&child_status); + } + + /* test fail, if 0xdeadbeef present on shadow stack address */ + if (*write_addr == 0xdeadbeef) { + ksft_print_msg("Shadow stack WPT failed\n"); + return false; + } + + /* if child reached here, then fail */ + if (!pid) { + ksft_print_msg("Shadow stack WPT failed: child reached unreachable state\n"); + return false; + } + + /* if child exited via signal handler but not for write on ss */ + if (WIFEXITED(child_status) && + WEXITSTATUS(child_status) != CHILD_EXIT_CODE_SSWRITE) { + ksft_print_msg("Shadow stack WPT failed: child wasn't signaled for write\n"); + return false; + } + + ret = munmap(write_addr, SHADOW_STACK_ALLOC_SIZE); + if (ret) { + ksft_print_msg("Shadow stack WPT failed: munmap failed, error code %d\n", + ret); + return false; + } + + return true; +} + +#define SS_MAGIC_WRITE_VAL 0xbeefdead + +int gup_tests(int mem_fd, unsigned long *shdw_addr) +{ + unsigned long val = 0; + + lseek(mem_fd, (unsigned long)shdw_addr, SEEK_SET); + if (read(mem_fd, &val, sizeof(val)) < 0) { + ksft_print_msg("Reading shadow stack mem via gup failed\n"); + return 1; + } + + val = SS_MAGIC_WRITE_VAL; + lseek(mem_fd, (unsigned long)shdw_addr, SEEK_SET); + if (write(mem_fd, &val, sizeof(val)) < 0) { + ksft_print_msg("Writing shadow stack mem via gup failed\n"); + return 1; + } + + if (*shdw_addr != SS_MAGIC_WRITE_VAL) { + ksft_print_msg("GUP write to shadow stack memory failed\n"); + return 1; + } + + return 0; +} + +bool shadow_stack_gup_tests(unsigned long test_num, void *ctx) +{ + unsigned long shdw_addr = 0; + unsigned long *write_addr = NULL; + int fd = 0; + bool ret = false; + + ksft_print_msg("Exercising shadow stack gup tests\n"); + shdw_addr = my_syscall3(__NR_map_shadow_stack, NULL, SHADOW_STACK_ALLOC_SIZE, 0); + + if (((long)shdw_addr) <= 0) { + ksft_print_msg("map_shadow_stack failed with error code %d\n", (int)shdw_addr); + return false; + } + + write_addr = (unsigned long *)shdw_addr; + + fd = open("/proc/self/mem", O_RDWR); + if (fd == -1) + return false; + + if (gup_tests(fd, write_addr)) { + ksft_print_msg("gup tests failed\n"); + goto out; + } + + ret = true; +out: + if (shdw_addr && munmap(write_addr, SHADOW_STACK_ALLOC_SIZE)) { + ksft_print_msg("munmap failed with error code %d\n", ret); + ret = false; + } + + return ret; +} + +volatile bool break_loop; + +void sigusr1_handler(int signo) +{ + break_loop = true; +} + +bool sigusr1_signal_test(void) +{ + struct sigaction sa = {}; + + sa.sa_handler = sigusr1_handler; + sa.sa_flags = 0; + sigemptyset(&sa.sa_mask); + if (sigaction(SIGUSR1, &sa, NULL)) { + ksft_print_msg("Registering signal handler for SIGUSR1 failed\n"); + return false; + } + + return true; +} + +/* + * shadow stack signal test. shadow stack must be enabled. + * register a signal, fork another thread which is waiting + * on signal. Send a signal from parent to child, verify + * that signal was received by child. If not test fails + */ +bool shadow_stack_signal_test(unsigned long test_num, void *ctx) +{ + int pid = 0, child_status = 0, ret = 0; + unsigned long ss_status = 0; + + ksft_print_msg("Exercising shadow stack signal test\n"); + + ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &ss_status, 0, 0, 0); + if (ret) { + ksft_print_msg("Shadow stack get status prctl failed with errorcode %d\n", ret); + return false; + } + + if (!(ss_status & PR_SHADOW_STACK_ENABLE)) + ksft_print_msg("Shadow stack is not enabled, should be enabled via glibc\n"); + + /* this should be caught by signal handler and do an exit */ + if (!sigusr1_signal_test()) { + ksft_print_msg("Registering sigusr1 handler failed\n"); + exit(-1); + } + + pid = fork(); + + if (pid == -1) { + ksft_print_msg("Signal test: fork failed\n"); + goto out; + } + + if (pid == 0) { + while (!break_loop) + sleep(1); + + exit(11); + /* child shouldn't go beyond here */ + } + + /* send SIGUSR1 to child */ + kill(pid, SIGUSR1); + wait(&child_status); + +out: + + return (WIFEXITED(child_status) && + WEXITSTATUS(child_status) == 11); +} + +int execute_shadow_stack_tests(void) +{ + int ret = 0; + unsigned long test_count = 0; + unsigned long shstk_status = 0; + bool test_pass = false; + + ksft_print_msg("Executing RISC-V shadow stack self tests\n"); + ksft_set_plan(RISCV_SHADOW_STACK_TESTS); + + ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &shstk_status, 0, 0, 0); + + if (ret != 0) + ksft_exit_fail_msg("Get shadow stack status failed with %d\n", ret); + + /* + * If we are here that means get shadow stack status succeeded and + * thus shadow stack support is baked in the kernel. + */ + while (test_count < RISCV_SHADOW_STACK_TESTS) { + test_pass = (*shstk_tests[test_count].t_func)(test_count, NULL); + ksft_test_result(test_pass, shstk_tests[test_count].name); + test_count++; + } + + ksft_finished(); + + return 0; +} + +#pragma GCC pop_options diff --git a/tools/testing/selftests/riscv/cfi/shadowstack.h b/tools/testing/selftests/riscv/cfi/shadowstack.h new file mode 100644 index 000000000000..943a3685905f --- /dev/null +++ b/tools/testing/selftests/riscv/cfi/shadowstack.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef SELFTEST_SHADOWSTACK_TEST_H +#define SELFTEST_SHADOWSTACK_TEST_H +#include +#include + +/* + * A CFI test returns true for success or false for fail. + * Takes a test number to index into array, and a void pointer. + */ +typedef bool (*shstk_test_func)(unsigned long test_num, void *); + +struct shadow_stack_tests { + char *name; + shstk_test_func t_func; +}; + +bool shadow_stack_fork_test(unsigned long test_num, void *ctx); +bool shadow_stack_map_test(unsigned long test_num, void *ctx); +bool shadow_stack_protection_test(unsigned long test_num, void *ctx); +bool shadow_stack_gup_tests(unsigned long test_num, void *ctx); +bool shadow_stack_signal_test(unsigned long test_num, void *ctx); + +int execute_shadow_stack_tests(void); + +#endif From 8cdb04bd06c167461b357150b3ca46983eb70dc3 Mon Sep 17 00:00:00 2001 From: Ilya Mamay Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 32/42] riscv: ptrace: return ENODATA for inactive vector extension Currently, ptrace returns EINVAL when the vector extension is supported but not yet activated for the traced process. This error code is not always appropriate since the ptrace arguments may be valid. Debug tools like gdbserver expect ENODATA when the requested register set is not active, e.g. see [1]. This expectation seems to be more appropriate, so modify the vector ptrace implementation to return: - EINVAL when V extension is not supported - ENODATA when V extension is supported but not active [1] https://github.com/bminor/binutils-gdb/blob/637f25e88675fa47e47f9cc5e2cf37384836b8a2/gdbserver/linux-low.cc#L5020 Signed-off-by: Ilya Mamay Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-2-geomatsi@gmail.com Signed-off-by: Paul Walmsley --- arch/riscv/kernel/ptrace.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 57e257d459e8..97636fdfeb77 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -99,9 +99,12 @@ static int riscv_vr_get(struct task_struct *target, struct __riscv_v_ext_state *vstate = &target->thread.vstate; struct __riscv_v_regset_state ptrace_vstate; - if (!riscv_v_vstate_query(task_pt_regs(target))) + if (!(has_vector() || has_xtheadvector())) return -EINVAL; + if (!riscv_v_vstate_query(task_pt_regs(target))) + return -ENODATA; + /* * Ensure the vector registers have been saved to the memory before * copying them to membuf. @@ -134,9 +137,12 @@ static int riscv_vr_set(struct task_struct *target, struct __riscv_v_ext_state *vstate = &target->thread.vstate; struct __riscv_v_regset_state ptrace_vstate; - if (!riscv_v_vstate_query(task_pt_regs(target))) + if (!(has_vector() || has_xtheadvector())) return -EINVAL; + if (!riscv_v_vstate_query(task_pt_regs(target))) + return -ENODATA; + /* Copy rest of the vstate except datap */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0, sizeof(struct __riscv_v_regset_state)); From ef3ff40346db8476a9ef7269fc9d1837e7243c40 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:56 -0700 Subject: [PATCH 33/42] riscv: vector: init vector context with proper vlenb The vstate in thread_struct is zeroed when the vector context is initialized. That includes read-only register vlenb, which holds the vector register length in bytes. Zeroed state persists until mstatus.VS becomes 'dirty' and a context switch saves the actual hardware values. This can expose the zero vlenb value to the user-space in early debug scenarios, e.g. when ptrace attaches to a traced process early, before any vector instruction except the first one was executed. Fix this by specifying proper vlenb on vector context init. Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-3-geomatsi@gmail.com Signed-off-by: Paul Walmsley --- arch/riscv/kernel/vector.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 3ed071dab9d8..b112166d51e9 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -111,8 +111,8 @@ bool insn_is_vector(u32 insn_buf) return false; } -static int riscv_v_thread_zalloc(struct kmem_cache *cache, - struct __riscv_v_ext_state *ctx) +static int riscv_v_thread_ctx_alloc(struct kmem_cache *cache, + struct __riscv_v_ext_state *ctx) { void *datap; @@ -122,13 +122,15 @@ static int riscv_v_thread_zalloc(struct kmem_cache *cache, ctx->datap = datap; memset(ctx, 0, offsetof(struct __riscv_v_ext_state, datap)); + ctx->vlenb = riscv_v_vsize / 32; + return 0; } void riscv_v_thread_alloc(struct task_struct *tsk) { #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE - riscv_v_thread_zalloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate); + riscv_v_thread_ctx_alloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate); #endif } @@ -214,12 +216,14 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) * context where VS has been off. So, try to allocate the user's V * context and resume execution. */ - if (riscv_v_thread_zalloc(riscv_v_user_cachep, ¤t->thread.vstate)) { + if (riscv_v_thread_ctx_alloc(riscv_v_user_cachep, ¤t->thread.vstate)) { force_sig(SIGBUS); return true; } + riscv_v_vstate_on(regs); riscv_v_vstate_set_restore(current, regs); + return true; } From fd515e037efb3b6300eace247e14ab2bc7e38db5 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 34/42] riscv: csr: define vtype register elements Define masks and shifts for vtype CSR according to the vector specs: - v0.7.1 used in early T-Head cores, known as xtheadvector in the kernel - v1.0 Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-4-geomatsi@gmail.com Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/csr.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 55a07e8722ff..31b8988f4488 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -458,6 +458,23 @@ #define CSR_VTYPE 0xc21 #define CSR_VLENB 0xc22 +#define VTYPE_VLMUL _AC(7, UL) +#define VTYPE_VLMUL_FRAC _AC(4, UL) +#define VTYPE_VSEW_SHIFT 3 +#define VTYPE_VSEW (_AC(7, UL) << VTYPE_VSEW_SHIFT) +#define VTYPE_VTA_SHIFT 6 +#define VTYPE_VTA (_AC(1, UL) << VTYPE_VTA_SHIFT) +#define VTYPE_VMA_SHIFT 7 +#define VTYPE_VMA (_AC(1, UL) << VTYPE_VMA_SHIFT) +#define VTYPE_VILL_SHIFT (__riscv_xlen - 1) +#define VTYPE_VILL (_AC(1, UL) << VTYPE_VILL_SHIFT) + +#define VTYPE_VLMUL_THEAD _AC(3, UL) +#define VTYPE_VSEW_THEAD_SHIFT 2 +#define VTYPE_VSEW_THEAD (_AC(7, UL) << VTYPE_VSEW_THEAD_SHIFT) +#define VTYPE_VEDIV_THEAD_SHIFT 5 +#define VTYPE_VEDIV_THEAD (_AC(3, UL) << VTYPE_VEDIV_THEAD_SHIFT) + /* Scalar Crypto Extension - Entropy */ #define CSR_SEED 0x015 #define SEED_OPST_MASK _AC(0xC0000000, UL) From f4be988f5b547dc4b305c15a078a52cdde76a8f5 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 35/42] riscv: ptrace: validate input vector csr registers Add strict validation for vector csr registers when setting them via ptrace: - reject attempts to set reserved bits or invalid field combinations - enforce strict VL checks against calculated VLMAX values Vector specs 0.7.1 and 1.0 allow normal applications to set candidate VL values and read back the hardware-adjusted results, see section 6 for details. Disallow such flexibility in vector ptrace operations and strictly enforce valid VL input. The traced process may not update its saved vector context if no vector instructions execute between breakpoints. So the purpose of the strict ptrace approach is to make sure that debuggers maintain an accurate view of the tracee's vector context across multiple halt/resume debug cycles. Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-5-geomatsi@gmail.com Signed-off-by: Paul Walmsley --- arch/riscv/kernel/ptrace.c | 88 +++++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 97636fdfeb77..e592bd6b7665 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -128,6 +128,92 @@ static int riscv_vr_get(struct task_struct *target, return membuf_write(&to, vstate->datap, riscv_v_vsize); } +static int invalid_ptrace_v_csr(struct __riscv_v_ext_state *vstate, + struct __riscv_v_regset_state *ptrace) +{ + unsigned long vsew, vlmul, vfrac, vl; + unsigned long elen, vlen; + unsigned long sew, lmul; + unsigned long reserved; + + vlen = vstate->vlenb * 8; + if (vstate->vlenb != ptrace->vlenb) + return 1; + + /* do not allow to set vcsr/vxrm/vxsat reserved bits */ + reserved = ~(CSR_VXSAT_MASK | (CSR_VXRM_MASK << CSR_VXRM_SHIFT)); + if (ptrace->vcsr & reserved) + return 1; + + if (has_vector()) { + /* do not allow to set vtype reserved bits and vill bit */ + reserved = ~(VTYPE_VSEW | VTYPE_VLMUL | VTYPE_VMA | VTYPE_VTA); + if (ptrace->vtype & reserved) + return 1; + + elen = riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE64X) ? 64 : 32; + vsew = (ptrace->vtype & VTYPE_VSEW) >> VTYPE_VSEW_SHIFT; + sew = 8 << vsew; + + if (sew > elen) + return 1; + + vfrac = (ptrace->vtype & VTYPE_VLMUL_FRAC); + vlmul = (ptrace->vtype & VTYPE_VLMUL); + + /* RVV 1.0 spec 3.4.2: VLMUL(0x4) reserved */ + if (vlmul == 4) + return 1; + + /* RVV 1.0 spec 3.4.2: (LMUL < SEW_min / ELEN) reserved */ + if (vlmul == 5 && elen == 32) + return 1; + + /* for zero vl verify that at least one element is possible */ + vl = ptrace->vl ? ptrace->vl : 1; + + if (vfrac) { + /* integer 1/LMUL: VL =< VLMAX = VLEN / SEW / LMUL */ + lmul = 2 << (3 - (vlmul - vfrac)); + if (vlen < vl * sew * lmul) + return 1; + } else { + /* integer LMUL: VL =< VLMAX = LMUL * VLEN / SEW */ + lmul = 1 << vlmul; + if (vl * sew > lmul * vlen) + return 1; + } + } + + if (has_xtheadvector()) { + /* do not allow to set vtype reserved bits and vill bit */ + reserved = ~(VTYPE_VSEW_THEAD | VTYPE_VLMUL_THEAD | VTYPE_VEDIV_THEAD); + if (ptrace->vtype & reserved) + return 1; + + /* + * THead ISA Extension spec chapter 16: + * divided element extension ('Zvediv') is not part of XTheadVector + */ + if (ptrace->vtype & VTYPE_VEDIV_THEAD) + return 1; + + vsew = (ptrace->vtype & VTYPE_VSEW_THEAD) >> VTYPE_VSEW_THEAD_SHIFT; + sew = 8 << vsew; + + vlmul = (ptrace->vtype & VTYPE_VLMUL_THEAD); + lmul = 1 << vlmul; + + /* for zero vl verify that at least one element is possible */ + vl = ptrace->vl ? ptrace->vl : 1; + + if (vl * sew > lmul * vlen) + return 1; + } + + return 0; +} + static int riscv_vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -149,7 +235,7 @@ static int riscv_vr_set(struct task_struct *target, if (unlikely(ret)) return ret; - if (vstate->vlenb != ptrace_vstate.vlenb) + if (invalid_ptrace_v_csr(vstate, &ptrace_vstate)) return -EINVAL; vstate->vstart = ptrace_vstate.vstart; From 600f72ded8c877be95322ce806d23345ea5e89bc Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 36/42] selftests: riscv: test ptrace vector interface Add a test case to check ptrace behavior in the case when vector extension is supported by the system, but vector context is not yet enabled for the traced process. Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-6-geomatsi@gmail.com [pjw@kernel.org: dropped duplicate sys/wait.h include] Signed-off-by: Paul Walmsley --- .../testing/selftests/riscv/vector/.gitignore | 2 + tools/testing/selftests/riscv/vector/Makefile | 10 ++- .../selftests/riscv/vector/v_helpers.c | 23 ++++++ .../selftests/riscv/vector/v_helpers.h | 2 + .../riscv/vector/validate_v_ptrace.c | 79 +++++++++++++++++++ 5 files changed, 115 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/riscv/vector/validate_v_ptrace.c diff --git a/tools/testing/selftests/riscv/vector/.gitignore b/tools/testing/selftests/riscv/vector/.gitignore index 7d9c87cd0649..40a82baf364f 100644 --- a/tools/testing/selftests/riscv/vector/.gitignore +++ b/tools/testing/selftests/riscv/vector/.gitignore @@ -2,3 +2,5 @@ vstate_exec_nolibc vstate_prctl v_initval v_exec_initval_nolibc +vstate_ptrace +validate_v_ptrace diff --git a/tools/testing/selftests/riscv/vector/Makefile b/tools/testing/selftests/riscv/vector/Makefile index 2c2a33fc083e..326dafd739bf 100644 --- a/tools/testing/selftests/riscv/vector/Makefile +++ b/tools/testing/selftests/riscv/vector/Makefile @@ -2,11 +2,14 @@ # Copyright (C) 2021 ARM Limited # Originally tools/testing/arm64/abi/Makefile -TEST_GEN_PROGS := v_initval vstate_prctl vstate_ptrace +TEST_GEN_PROGS := v_initval vstate_prctl vstate_ptrace validate_v_ptrace TEST_GEN_PROGS_EXTENDED := vstate_exec_nolibc v_exec_initval_nolibc +TEST_GEN_LIBS := v_helpers.c sys_hwprobe.c include ../../lib.mk +TEST_GEN_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(TEST_GEN_LIBS)) + $(OUTPUT)/sys_hwprobe.o: ../hwprobe/sys_hwprobe.S $(CC) -static -c -o$@ $(CFLAGS) $^ @@ -29,3 +32,8 @@ $(OUTPUT)/v_exec_initval_nolibc: v_exec_initval_nolibc.c $(OUTPUT)/vstate_ptrace: vstate_ptrace.c $(OUTPUT)/sys_hwprobe.o $(OUTPUT)/v_helpers.o $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ + +$(OUTPUT)/validate_v_ptrace: validate_v_ptrace.c $(OUTPUT)/sys_hwprobe.o $(OUTPUT)/v_helpers.o + $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ + +EXTRA_CLEAN += $(TEST_GEN_OBJ) diff --git a/tools/testing/selftests/riscv/vector/v_helpers.c b/tools/testing/selftests/riscv/vector/v_helpers.c index 01a8799dcb78..de6da7c8d2f1 100644 --- a/tools/testing/selftests/riscv/vector/v_helpers.c +++ b/tools/testing/selftests/riscv/vector/v_helpers.c @@ -26,6 +26,29 @@ bool is_vector_supported(void) return pair.value & RISCV_HWPROBE_EXT_ZVE32X; } +unsigned long get_vr_len(void) +{ + unsigned long vlenb; + + if (is_vector_supported()) { + asm volatile("csrr %[vlenb], vlenb" : [vlenb] "=r"(vlenb)); + return vlenb; + } + + if (is_xtheadvector_supported()) { + asm volatile ( + // 0 | zimm[10:0] | rs1 | 1 1 1 | rd | 1010111 | vsetvli + // vsetvli t4, x0, e8, m1, d1 + ".4byte 0b00000000000000000111111011010111\n\t" + "mv %[vlenb], t4\n\t" + : [vlenb] "=r"(vlenb) : : "memory", "t4"); + return vlenb; + } + + printf("WARNING: vector not supported\n"); + return 0; +} + int launch_test(char *next_program, int test_inherit, int xtheadvector) { char *exec_argv[4], *exec_envp[1]; diff --git a/tools/testing/selftests/riscv/vector/v_helpers.h b/tools/testing/selftests/riscv/vector/v_helpers.h index 763cddfe26da..c538077f1195 100644 --- a/tools/testing/selftests/riscv/vector/v_helpers.h +++ b/tools/testing/selftests/riscv/vector/v_helpers.h @@ -5,4 +5,6 @@ bool is_xtheadvector_supported(void); bool is_vector_supported(void); +unsigned long get_vr_len(void); + int launch_test(char *next_program, int test_inherit, int xtheadvector); diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c new file mode 100644 index 000000000000..3ffef2704b0b --- /dev/null +++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "kselftest_harness.h" +#include "v_helpers.h" + +volatile unsigned long chld_lock; + +TEST(ptrace_v_not_enabled) +{ + pid_t pid; + + if (!(is_vector_supported() || is_xtheadvector_supported())) + SKIP(return, "Vector not supported"); + + chld_lock = 1; + pid = fork(); + ASSERT_LE(0, pid) + TH_LOG("fork: %m"); + + if (pid == 0) { + while (chld_lock == 1) + asm volatile("" : : "g"(chld_lock) : "memory"); + + asm volatile ("ebreak" : : : ); + } else { + struct __riscv_v_regset_state *regset_data; + unsigned long vlenb = get_vr_len(); + size_t regset_size; + struct iovec iov; + int status; + int ret; + + /* attach */ + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* unlock */ + + ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0)); + + /* resume and wait for ebreak */ + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* try to read vector registers from the tracee */ + + regset_size = sizeof(*regset_data) + vlenb * 32; + regset_data = calloc(1, regset_size); + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + /* V extension is available, but not yet enabled for the tracee */ + + errno = 0; + ret = ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov); + ASSERT_EQ(ENODATA, errno); + ASSERT_EQ(-1, ret); + + /* cleanup */ + + ASSERT_EQ(0, kill(pid, SIGKILL)); + } +} + +TEST_HARNESS_MAIN From 66d03044891df63c82b18ae1da07bc4bc077ae48 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 37/42] selftests: riscv: verify initial vector state with ptrace Add a test case that attaches to a traced process immediately after its first executed vector instructions to verify the initial vector context. Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-7-geomatsi@gmail.com Signed-off-by: Paul Walmsley --- .../riscv/vector/validate_v_ptrace.c | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c index 3ffef2704b0b..768ef93b33da 100644 --- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c +++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c @@ -12,6 +12,9 @@ #include "kselftest_harness.h" #include "v_helpers.h" +#define SR_FS_DIRTY 0x00006000UL +#define CSR_VXRM_SHIFT 1 + volatile unsigned long chld_lock; TEST(ptrace_v_not_enabled) @@ -76,4 +79,136 @@ TEST(ptrace_v_not_enabled) } } +TEST(ptrace_v_early_debug) +{ + static volatile unsigned long vstart; + static volatile unsigned long vtype; + static volatile unsigned long vlenb; + static volatile unsigned long vcsr; + static volatile unsigned long vl; + bool xtheadvector; + pid_t pid; + + if (!(is_vector_supported() || is_xtheadvector_supported())) + SKIP(return, "Vector not supported"); + + xtheadvector = is_xtheadvector_supported(); + + chld_lock = 1; + pid = fork(); + ASSERT_LE(0, pid) + TH_LOG("fork: %m"); + + if (pid == 0) { + unsigned long vxsat, vxrm; + + vlenb = get_vr_len(); + + while (chld_lock == 1) + asm volatile ("" : : "g"(chld_lock) : "memory"); + + asm volatile ( + "csrr %[vstart], vstart\n" + "csrr %[vtype], vtype\n" + "csrr %[vl], vl\n" + : [vtype] "=r"(vtype), [vstart] "=r"(vstart), [vl] "=r"(vl) + : + : "memory"); + + /* no 'is_xtheadvector_supported()' here to avoid clobbering v-state by syscall */ + if (xtheadvector) { + asm volatile ( + "csrs sstatus, %[bit]\n" + "csrr %[vxsat], vxsat\n" + "csrr %[vxrm], vxrm\n" + : [vxsat] "=r"(vxsat), [vxrm] "=r"(vxrm) + : [bit] "r" (SR_FS_DIRTY) + : "memory"); + vcsr = vxsat | vxrm << CSR_VXRM_SHIFT; + } else { + asm volatile ( + "csrr %[vcsr], vcsr\n" + : [vcsr] "=r"(vcsr) + : + : "memory"); + } + + asm volatile ( + ".option push\n" + ".option norvc\n" + "ebreak\n" + ".option pop\n"); + } else { + struct __riscv_v_regset_state *regset_data; + unsigned long vstart_csr; + unsigned long vlenb_csr; + unsigned long vtype_csr; + unsigned long vcsr_csr; + unsigned long vl_csr; + size_t regset_size; + struct iovec iov; + int status; + + /* attach */ + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* unlock */ + + ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0)); + + /* resume and wait for ebreak */ + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace PEEKDATA */ + + errno = 0; + vstart_csr = ptrace(PTRACE_PEEKDATA, pid, &vstart, NULL); + ASSERT_FALSE((errno != 0) && (vstart_csr == -1)); + + errno = 0; + vl_csr = ptrace(PTRACE_PEEKDATA, pid, &vl, NULL); + ASSERT_FALSE((errno != 0) && (vl_csr == -1)); + + errno = 0; + vtype_csr = ptrace(PTRACE_PEEKDATA, pid, &vtype, NULL); + ASSERT_FALSE((errno != 0) && (vtype_csr == -1)); + + errno = 0; + vcsr_csr = ptrace(PTRACE_PEEKDATA, pid, &vcsr, NULL); + ASSERT_FALSE((errno != 0) && (vcsr_csr == -1)); + + errno = 0; + vlenb_csr = ptrace(PTRACE_PEEKDATA, pid, &vlenb, NULL); + ASSERT_FALSE((errno != 0) && (vlenb_csr == -1)); + + /* read tracee csr regs using ptrace GETREGSET */ + + regset_size = sizeof(*regset_data) + vlenb_csr * 32; + regset_data = calloc(1, regset_size); + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* compare */ + + EXPECT_EQ(vstart_csr, regset_data->vstart); + EXPECT_EQ(vtype_csr, regset_data->vtype); + EXPECT_EQ(vlenb_csr, regset_data->vlenb); + EXPECT_EQ(vcsr_csr, regset_data->vcsr); + EXPECT_EQ(vl_csr, regset_data->vl); + + /* cleanup */ + + ASSERT_EQ(0, kill(pid, SIGKILL)); + } +} + TEST_HARNESS_MAIN From 3789d5eecd5ae01149d0ef5ba70e8120da2f55db Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 38/42] selftests: riscv: verify syscalls discard vector context Add a test to v_ptrace test suite to verify that vector csr registers are clobbered on syscalls. Signed-off-by: Sergey Matyukevich Reviewed-by: Andy Chiu Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-8-geomatsi@gmail.com [pjw@kernel.org: cleaned up a checkpatch issue] Signed-off-by: Paul Walmsley --- .../riscv/vector/validate_v_ptrace.c | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c index 768ef93b33da..7ff9a0cf229c 100644 --- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c +++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c @@ -211,4 +211,127 @@ TEST(ptrace_v_early_debug) } } +TEST(ptrace_v_syscall_clobbering) +{ + pid_t pid; + + if (!is_vector_supported() && !is_xtheadvector_supported()) + SKIP(return, "Vector not supported"); + + chld_lock = 1; + pid = fork(); + ASSERT_LE(0, pid) + TH_LOG("fork: %m"); + + if (pid == 0) { + unsigned long vl; + + while (chld_lock == 1) + asm volatile("" : : "g"(chld_lock) : "memory"); + + if (is_xtheadvector_supported()) { + asm volatile ( + // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli + // vsetvli t4, x0, e16, m2, d1 + ".4byte 0b00000000010100000111111011010111\n" + "mv %[new_vl], t4\n" + : [new_vl] "=r" (vl) : : "t4"); + } else { + asm volatile ( + ".option push\n" + ".option arch, +zve32x\n" + "vsetvli %[new_vl], x0, e16, m2, tu, mu\n" + ".option pop\n" + : [new_vl] "=r"(vl) : : ); + } + + while (1) { + asm volatile ( + ".option push\n" + ".option norvc\n" + "ebreak\n" + ".option pop\n"); + + sleep(0); + } + } else { + struct __riscv_v_regset_state *regset_data; + unsigned long vlenb = get_vr_len(); + struct user_regs_struct regs; + size_t regset_size; + struct iovec iov; + int status; + + /* attach */ + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* unlock */ + + ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0)); + + /* resume and wait for the 1st ebreak */ + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace GETREGSET */ + + regset_size = sizeof(*regset_data) + vlenb * 32; + regset_data = calloc(1, regset_size); + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify initial vsetvli settings */ + + if (is_xtheadvector_supported()) + EXPECT_EQ(5UL, regset_data->vtype); + else + EXPECT_EQ(9UL, regset_data->vtype); + + EXPECT_EQ(regset_data->vlenb, regset_data->vl); + EXPECT_EQ(vlenb, regset_data->vlenb); + EXPECT_EQ(0UL, regset_data->vstart); + EXPECT_EQ(0UL, regset_data->vcsr); + + /* skip 1st ebreak, then resume and wait for the 2nd ebreak */ + + iov.iov_base = ®s; + iov.iov_len = sizeof(regs); + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov)); + regs.pc += 4; + ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov)); + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vtype using ptrace GETREGSET */ + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify that V state is illegal after syscall */ + + EXPECT_EQ((1UL << (__riscv_xlen - 1)), regset_data->vtype); + EXPECT_EQ(vlenb, regset_data->vlenb); + EXPECT_EQ(0UL, regset_data->vstart); + EXPECT_EQ(0UL, regset_data->vcsr); + EXPECT_EQ(0UL, regset_data->vl); + + /* cleanup */ + + ASSERT_EQ(0, kill(pid, SIGKILL)); + } +} + TEST_HARNESS_MAIN From 30eb191c895b086c21fc04c5c1482cb1bb0f3caf Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 39/42] selftests: riscv: verify ptrace rejects invalid vector csr inputs Add a test to v_ptrace test suite to verify that ptrace rejects the invalid input combinations of vector csr registers. Use kselftest fixture variants to create multiple invalid inputs for the test. Signed-off-by: Sergey Matyukevich Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-9-geomatsi@gmail.com [pjw@kernel.org: cleaned up some checkpatch issues] Signed-off-by: Paul Walmsley --- .../riscv/vector/validate_v_ptrace.c | 317 ++++++++++++++++++ 1 file changed, 317 insertions(+) diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c index 7ff9a0cf229c..3919df68edc8 100644 --- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c +++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c @@ -334,4 +334,321 @@ TEST(ptrace_v_syscall_clobbering) } } +FIXTURE(v_csr_invalid) +{ +}; + +FIXTURE_SETUP(v_csr_invalid) +{ +} + +FIXTURE_TEARDOWN(v_csr_invalid) +{ +} + +#define VECTOR_1_0 BIT(0) +#define XTHEAD_VECTOR_0_7 BIT(1) + +#define vector_test(x) ((x) & VECTOR_1_0) +#define xthead_test(x) ((x) & XTHEAD_VECTOR_0_7) + +/* modifications of the initial vsetvli settings */ +FIXTURE_VARIANT(v_csr_invalid) +{ + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb_mul; + unsigned long vlenb_min; + unsigned long vlenb_max; + unsigned long spec; +}; + +/* unexpected vlenb value */ +FIXTURE_VARIANT_ADD(v_csr_invalid, new_vlenb) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x3, + .vcsr = 0x0, + .vlenb_mul = 0x2, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7, +}; + +/* invalid reserved bits in vcsr */ +FIXTURE_VARIANT_ADD(v_csr_invalid, vcsr_invalid_reserved_bits) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x3, + .vcsr = 0x1UL << 8, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7, +}; + +/* invalid reserved bits in vtype */ +FIXTURE_VARIANT_ADD(v_csr_invalid, vtype_invalid_reserved_bits) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = (0x1UL << 8) | 0x3, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7, +}; + +/* set vill bit */ +FIXTURE_VARIANT_ADD(v_csr_invalid, invalid_vill_bit) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = (0x1UL << (__riscv_xlen - 1)) | 0x3, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7, +}; + +/* reserved vsew value: vsew > 3 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vsew) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x4UL << 3, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0, +}; + +/* XTheadVector: unsupported non-zero VEDIV value */ +FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vediv) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x3UL << 5, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = XTHEAD_VECTOR_0_7, +}; + +/* reserved vlmul value: vlmul == 4 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vlmul) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x4, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x0, + .spec = VECTOR_1_0, +}; + +/* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, frac_lmul1) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x1d, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x20, + .spec = VECTOR_1_0, +}; + +/* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul1) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x19, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x2, + .spec = VECTOR_1_0, +}; + +/* XTheadVector: invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul2) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0xd, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x2, + .spec = XTHEAD_VECTOR_0_7, +}; + +/* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, vl1) +{ + .vstart = 0x0, + .vl = 0x8, + .vtype = 0x19, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x10, + .spec = VECTOR_1_0, +}; + +/* XTheadVector: invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */ +FIXTURE_VARIANT_ADD(v_csr_invalid, vl2) +{ + .vstart = 0x0, + .vl = 0x8, + .vtype = 0xd, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x0, + .vlenb_max = 0x10, + .spec = XTHEAD_VECTOR_0_7, +}; + +TEST_F(v_csr_invalid, ptrace_v_invalid_values) +{ + unsigned long vlenb; + pid_t pid; + + if (!is_vector_supported() && !is_xtheadvector_supported()) + SKIP(return, "Vectors not supported"); + + if (is_vector_supported() && !vector_test(variant->spec)) + SKIP(return, "Test not supported for Vector"); + + if (is_xtheadvector_supported() && !xthead_test(variant->spec)) + SKIP(return, "Test not supported for XTheadVector"); + + vlenb = get_vr_len(); + + if (variant->vlenb_min) { + if (vlenb < variant->vlenb_min) + SKIP(return, "This test does not support VLEN < %lu\n", + variant->vlenb_min * 8); + } + + if (variant->vlenb_max) { + if (vlenb > variant->vlenb_max) + SKIP(return, "This test does not support VLEN > %lu\n", + variant->vlenb_max * 8); + } + + chld_lock = 1; + pid = fork(); + ASSERT_LE(0, pid) + TH_LOG("fork: %m"); + + if (pid == 0) { + unsigned long vl; + + while (chld_lock == 1) + asm volatile("" : : "g"(chld_lock) : "memory"); + + if (is_xtheadvector_supported()) { + asm volatile ( + // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli + // vsetvli t4, x0, e16, m2, d1 + ".4byte 0b00000000010100000111111011010111\n" + "mv %[new_vl], t4\n" + : [new_vl] "=r" (vl) : : "t4"); + } else { + asm volatile ( + ".option push\n" + ".option arch, +zve32x\n" + "vsetvli %[new_vl], x0, e16, m2, tu, mu\n" + ".option pop\n" + : [new_vl] "=r"(vl) : : ); + } + + while (1) { + asm volatile ( + ".option push\n" + ".option norvc\n" + "ebreak\n" + "nop\n" + ".option pop\n"); + } + } else { + struct __riscv_v_regset_state *regset_data; + size_t regset_size; + struct iovec iov; + int status; + int ret; + + /* attach */ + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* unlock */ + + ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0)); + + /* resume and wait for the 1st ebreak */ + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace GETREGSET */ + + regset_size = sizeof(*regset_data) + vlenb * 32; + regset_data = calloc(1, regset_size); + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify initial vsetvli settings */ + + if (is_xtheadvector_supported()) + EXPECT_EQ(5UL, regset_data->vtype); + else + EXPECT_EQ(9UL, regset_data->vtype); + + EXPECT_EQ(regset_data->vlenb, regset_data->vl); + EXPECT_EQ(vlenb, regset_data->vlenb); + EXPECT_EQ(0UL, regset_data->vstart); + EXPECT_EQ(0UL, regset_data->vcsr); + + /* apply invalid settings from fixture variants */ + + regset_data->vlenb *= variant->vlenb_mul; + regset_data->vstart = variant->vstart; + regset_data->vtype = variant->vtype; + regset_data->vcsr = variant->vcsr; + regset_data->vl = variant->vl; + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + errno = 0; + ret = ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov); + ASSERT_EQ(errno, EINVAL); + ASSERT_EQ(ret, -1); + + /* cleanup */ + + ASSERT_EQ(0, kill(pid, SIGKILL)); + } +} + TEST_HARNESS_MAIN From 849f05ae1ea6e1ff621243dce27fe455fdc9d0ff Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 40/42] selftests: riscv: verify ptrace accepts valid vector csr values Add a test to v_ptrace test suite to verify that ptrace accepts the valid input combinations of vector csr registers. Use kselftest fixture variants to create multiple inputs for the test. The test simulates a debug scenario with three breakpoints: 0. init: let the tracee set up its initial vector configuration 1. 1st bp: modify the tracee's vector csr registers from the debugger - resume the tracee to execute a block without vector instructions 2. 2nd bp: read back the tracees's vector csr registers from the debugger - compare with values set by the debugger - resume the tracee to execute a block with vector instructions 3. 3rd bp: read back the tracess's vector csr registers again - compare with values set by the debugger The last check helps to confirm that ptrace validation check for vector csr registers input values works properly and maintains an accurate view of the tracee's vector context in debugger. Signed-off-by: Sergey Matyukevich Tested-by: Andy Chiu Link: https://patch.msgid.link/20251214163537.1054292-10-geomatsi@gmail.com [pjw@kernel.org: cleaned up a checkpatch issue] Signed-off-by: Paul Walmsley --- .../riscv/vector/validate_v_ptrace.c | 261 ++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c index 3919df68edc8..3589549f7228 100644 --- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c +++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c @@ -651,4 +651,265 @@ TEST_F(v_csr_invalid, ptrace_v_invalid_values) } } +FIXTURE(v_csr_valid) +{ +}; + +FIXTURE_SETUP(v_csr_valid) +{ +} + +FIXTURE_TEARDOWN(v_csr_valid) +{ +} + +/* modifications of the initial vsetvli settings */ +FIXTURE_VARIANT(v_csr_valid) +{ + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb_mul; + unsigned long vlenb_min; + unsigned long vlenb_max; + unsigned long spec; +}; + +/* valid for VLEN >= 128: LMUL= 1/4, SEW = 32 */ +FIXTURE_VARIANT_ADD(v_csr_valid, frac_lmul1) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x16, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x10, + .vlenb_max = 0x0, + .spec = VECTOR_1_0, +}; + +/* valid for VLEN >= 16: LMUL= 2, SEW = 32 */ +FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul1) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x11, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x2, + .vlenb_max = 0x0, + .spec = VECTOR_1_0, +}; + +/* valid for XTheadVector VLEN >= 16: LMUL= 2, SEW = 32 */ +FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul2) +{ + .vstart = 0x0, + .vl = 0x0, + .vtype = 0x9, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x2, + .vlenb_max = 0x0, + .spec = XTHEAD_VECTOR_0_7, +}; + +/* valid for VLEN >= 32: LMUL= 2, SEW = 32, VL = 2 */ +FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul3) +{ + .vstart = 0x0, + .vl = 0x2, + .vtype = 0x11, + .vcsr = 0x0, + .vlenb_mul = 0x1, + .vlenb_min = 0x4, + .vlenb_max = 0x0, + .spec = VECTOR_1_0, +}; + +TEST_F(v_csr_valid, ptrace_v_valid_values) +{ + unsigned long vlenb; + pid_t pid; + + if (!is_vector_supported() && !is_xtheadvector_supported()) + SKIP(return, "Vectors not supported"); + + if (is_vector_supported() && !vector_test(variant->spec)) + SKIP(return, "Test not supported for Vector"); + + if (is_xtheadvector_supported() && !xthead_test(variant->spec)) + SKIP(return, "Test not supported for XTheadVector"); + + vlenb = get_vr_len(); + + if (variant->vlenb_min) { + if (vlenb < variant->vlenb_min) + SKIP(return, "This test does not support VLEN < %lu\n", + variant->vlenb_min * 8); + } + if (variant->vlenb_max) { + if (vlenb > variant->vlenb_max) + SKIP(return, "This test does not support VLEN > %lu\n", + variant->vlenb_max * 8); + } + + chld_lock = 1; + pid = fork(); + ASSERT_LE(0, pid) + TH_LOG("fork: %m"); + + if (pid == 0) { + unsigned long vl; + + while (chld_lock == 1) + asm volatile("" : : "g"(chld_lock) : "memory"); + + if (is_xtheadvector_supported()) { + asm volatile ( + // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli + // vsetvli t4, x0, e16, m2, d1 + ".4byte 0b00000000010100000111111011010111\n" + "mv %[new_vl], t4\n" + : [new_vl] "=r" (vl) : : "t4"); + } else { + asm volatile ( + ".option push\n" + ".option arch, +zve32x\n" + "vsetvli %[new_vl], x0, e16, m2, tu, mu\n" + ".option pop\n" + : [new_vl] "=r"(vl) : : ); + } + + asm volatile ( + ".option push\n" + ".option norvc\n" + ".option arch, +zve32x\n" + "ebreak\n" /* breakpoint 1: apply new V state using ptrace */ + "nop\n" + "ebreak\n" /* breakpoint 2: V state clean - context will not be saved */ + "vmv.v.i v0, -1\n" + "ebreak\n" /* breakpoint 3: V state dirty - context will be saved */ + ".option pop\n"); + } else { + struct __riscv_v_regset_state *regset_data; + struct user_regs_struct regs; + size_t regset_size; + struct iovec iov; + int status; + + /* attach */ + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* unlock */ + + ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0)); + + /* resume and wait for the 1st ebreak */ + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace GETREGSET */ + + regset_size = sizeof(*regset_data) + vlenb * 32; + regset_data = calloc(1, regset_size); + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify initial vsetvli settings */ + + if (is_xtheadvector_supported()) + EXPECT_EQ(5UL, regset_data->vtype); + else + EXPECT_EQ(9UL, regset_data->vtype); + + EXPECT_EQ(regset_data->vlenb, regset_data->vl); + EXPECT_EQ(vlenb, regset_data->vlenb); + EXPECT_EQ(0UL, regset_data->vstart); + EXPECT_EQ(0UL, regset_data->vcsr); + + /* apply valid settings from fixture variants */ + + regset_data->vlenb *= variant->vlenb_mul; + regset_data->vstart = variant->vstart; + regset_data->vtype = variant->vtype; + regset_data->vcsr = variant->vcsr; + regset_data->vl = variant->vl; + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* skip 1st ebreak, then resume and wait for the 2nd ebreak */ + + iov.iov_base = ®s; + iov.iov_len = sizeof(regs); + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov)); + regs.pc += 4; + ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov)); + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace GETREGSET */ + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify vector csr regs from tracee context */ + + EXPECT_EQ(regset_data->vstart, variant->vstart); + EXPECT_EQ(regset_data->vtype, variant->vtype); + EXPECT_EQ(regset_data->vcsr, variant->vcsr); + EXPECT_EQ(regset_data->vl, variant->vl); + EXPECT_EQ(regset_data->vlenb, vlenb); + + /* skip 2nd ebreak, then resume and wait for the 3rd ebreak */ + + iov.iov_base = ®s; + iov.iov_len = sizeof(regs); + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov)); + regs.pc += 4; + ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov)); + + ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL)); + ASSERT_EQ(pid, waitpid(pid, &status, 0)); + ASSERT_TRUE(WIFSTOPPED(status)); + + /* read tracee vector csr regs using ptrace GETREGSET */ + + iov.iov_base = regset_data; + iov.iov_len = regset_size; + + ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov)); + + /* verify vector csr regs from tracee context */ + + EXPECT_EQ(regset_data->vstart, variant->vstart); + EXPECT_EQ(regset_data->vtype, variant->vtype); + EXPECT_EQ(regset_data->vcsr, variant->vcsr); + EXPECT_EQ(regset_data->vl, variant->vl); + EXPECT_EQ(regset_data->vlenb, vlenb); + + /* cleanup */ + + ASSERT_EQ(0, kill(pid, SIGKILL)); + } +} + TEST_HARNESS_MAIN From 098921ec6818291d98bd3a4002c9dfbe2e75aac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Sun, 25 Jan 2026 21:09:57 -0700 Subject: [PATCH 41/42] selftests: riscv: vstate_exec_nolibc: Use the regular prctl() function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The my_syscall*() macros are internal implementation details of nolibc. Now that nolibc has a normal prctl() function, use that. Signed-off-by: Thomas Weißschuh Link: https://patch.msgid.link/20260117-nolibc-mysyscall-riscv-v1-1-0ae1ae3513e9@weissschuh.net Signed-off-by: Paul Walmsley --- tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c index 7b7d6f21acb4..12f1b1b1c7aa 100644 --- a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c +++ b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c @@ -16,10 +16,10 @@ int main(int argc, char **argv) if (argc > 2 && strcmp(argv[2], "x")) xtheadvector = 1; - ctrl = my_syscall1(__NR_prctl, PR_RISCV_V_GET_CONTROL); - if (ctrl < 0) { + ctrl = prctl(PR_RISCV_V_GET_CONTROL, 0, 0, 0, 0); + if (ctrl == -1) { puts("PR_RISCV_V_GET_CONTROL is not supported\n"); - return ctrl; + exit(-1); } if (test_inherit) { @@ -51,7 +51,7 @@ int main(int argc, char **argv) } if (!pid) { - rc = my_syscall1(__NR_prctl, PR_RISCV_V_GET_CONTROL); + rc = prctl(PR_RISCV_V_GET_CONTROL, 0, 0, 0, 0); if (rc != ctrl) { puts("child's vstate_ctrl not equal to parent's\n"); exit(-1); From 18be4ca5cb4e5a86833de97d331f5bc14a6c5a6d Mon Sep 17 00:00:00 2001 From: Feng Jiang Date: Sun, 25 Jan 2026 21:09:58 -0700 Subject: [PATCH 42/42] riscv: lib: optimize strlen loop efficiency Optimize the generic strlen implementation by using a pre-decrement pointer. This reduces the loop body from 4 instructions to 3 and eliminates the unconditional jump ('j'). Old loop (4 instructions, 2 branches): 1: lbu t0, 0(t1); beqz t0, 2f; addi t1, t1, 1; j 1b New loop (3 instructions, 1 branch): 1: addi t1, t1, 1; lbu t0, 0(t1); bnez t0, 1b This change improves execution efficiency and reduces branch pressure for systems without the Zbb extension. Signed-off-by: Feng Jiang Link: https://patch.msgid.link/20251218032614.57356-1-jiangfeng@kylinos.cn Signed-off-by: Paul Walmsley --- arch/riscv/lib/strlen.S | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S index eb4d2b7ed22b..e7736ccda514 100644 --- a/arch/riscv/lib/strlen.S +++ b/arch/riscv/lib/strlen.S @@ -21,13 +21,11 @@ SYM_FUNC_START(strlen) * Clobbers: * t0, t1 */ - mv t1, a0 + addi t1, a0, -1 1: - lbu t0, 0(t1) - beqz t0, 2f addi t1, t1, 1 - j 1b -2: + lbu t0, 0(t1) + bnez t0, 1b sub a0, t1, a0 ret