riscv: Implement arch-agnostic shadow stack prctls

Implement an architecture-agnostic prctl() interface for setting and
getting shadow stack status.  The prctls implemented are
PR_GET_SHADOW_STACK_STATUS, PR_SET_SHADOW_STACK_STATUS and
PR_LOCK_SHADOW_STACK_STATUS.

As part of PR_SET_SHADOW_STACK_STATUS/PR_GET_SHADOW_STACK_STATUS, only
PR_SHADOW_STACK_ENABLE is implemented because RISCV allows each mode to
write to their own shadow stack using 'sspush' or 'ssamoswap'.

PR_LOCK_SHADOW_STACK_STATUS locks the current shadow stack enablement
configuration.

Reviewed-by: Zong Li <zong.li@sifive.com>
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
Tested-by: Andreas Korb <andreas.korb@aisec.fraunhofer.de> # QEMU, custom CVA6
Tested-by: Valentin Haudiquet <valentin.haudiquet@canonical.com>
Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-12-b55691eacf4f@rivosinc.com
[pjw@kernel.org: cleaned up patch description]
Signed-off-by: Paul Walmsley <pjw@kernel.org>
This commit is contained in:
Deepak Gupta
2026-01-25 21:09:54 -07:00
committed by Paul Walmsley
parent fd44a4a855
commit 61a0200211
3 changed files with 134 additions and 0 deletions

View File

@@ -24,6 +24,16 @@ bool is_shstk_enabled(struct task_struct *task)
return task->thread_info.user_cfi_state.ubcfi_en;
}
bool is_shstk_allocated(struct task_struct *task)
{
return task->thread_info.user_cfi_state.shdw_stk_base;
}
bool is_shstk_locked(struct task_struct *task)
{
return task->thread_info.user_cfi_state.ubcfi_locked;
}
void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size)
{
task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr;
@@ -42,6 +52,26 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
}
void set_shstk_status(struct task_struct *task, bool enable)
{
if (!cpu_supports_shadow_stack())
return;
task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
if (enable)
task->thread.envcfg |= ENVCFG_SSE;
else
task->thread.envcfg &= ~ENVCFG_SSE;
csr_write(CSR_ENVCFG, task->thread.envcfg);
}
void set_shstk_lock(struct task_struct *task)
{
task->thread_info.user_cfi_state.ubcfi_locked = 1;
}
/*
* If size is 0, then to be compatible with regular stack we want it to be as big as
* regular stack. Else PAGE_ALIGN it and return back
@@ -258,3 +288,83 @@ void shstk_release(struct task_struct *tsk)
vm_munmap(base, size);
set_shstk_base(tsk, 0, 0);
}
int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status)
{
unsigned long bcfi_status = 0;
if (!cpu_supports_shadow_stack())
return -EINVAL;
/* this means shadow stack is enabled on the task */
bcfi_status |= (is_shstk_enabled(t) ? PR_SHADOW_STACK_ENABLE : 0);
return copy_to_user(status, &bcfi_status, sizeof(bcfi_status)) ? -EFAULT : 0;
}
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status)
{
unsigned long size = 0, addr = 0;
bool enable_shstk = false;
if (!cpu_supports_shadow_stack())
return -EINVAL;
/* Reject unknown flags */
if (status & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
return -EINVAL;
/* bcfi status is locked and further can't be modified by user */
if (is_shstk_locked(t))
return -EINVAL;
enable_shstk = status & PR_SHADOW_STACK_ENABLE;
/* Request is to enable shadow stack and shadow stack is not enabled already */
if (enable_shstk && !is_shstk_enabled(t)) {
/* shadow stack was allocated and enable request again
* no need to support such usecase and return EINVAL.
*/
if (is_shstk_allocated(t))
return -EINVAL;
size = calc_shstk_size(0);
addr = allocate_shadow_stack(0, size, 0, false);
if (IS_ERR_VALUE(addr))
return -ENOMEM;
set_shstk_base(t, addr, size);
set_active_shstk(t, addr + size);
}
/*
* If a request to disable shadow stack happens, let's go ahead and release it
* Although, if CLONE_VFORKed child did this, then in that case we will end up
* not releasing the shadow stack (because it might be needed in parent). Although
* we will disable it for VFORKed child. And if VFORKed child tries to enable again
* then in that case, it'll get entirely new shadow stack because following condition
* are true
* - shadow stack was not enabled for vforked child
* - shadow stack base was anyways pointing to 0
* This shouldn't be a big issue because we want parent to have availability of shadow
* stack whenever VFORKed child releases resources via exit or exec but at the same
* time we want VFORKed child to break away and establish new shadow stack if it desires
*
*/
if (!enable_shstk)
shstk_release(t);
set_shstk_status(t, enable_shstk);
return 0;
}
int arch_lock_shadow_stack_status(struct task_struct *task,
unsigned long arg)
{
/* If shtstk not supported or not enabled on task, nothing to lock here */
if (!cpu_supports_shadow_stack() ||
!is_shstk_enabled(task) || arg != 0)
return -EINVAL;
set_shstk_lock(task);
return 0;
}