mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Userspace specifies CLONE_VM to share address space and spawn new thread. 'clone' allows userspace to specify a new stack for a new thread. However there is no way to specify a new shadow stack base address without changing the API. This patch allocates a new shadow stack whenever CLONE_VM is given. In case of CLONE_VFORK, the parent is suspended until the child finishes; thus the child can use the parent's shadow stack. In case of !CLONE_VM, COW kicks in because entire address space is copied from parent to child. 'clone3' is extensible and can provide mechanisms for specifying the shadow stack as an input parameter. This is not settled yet and is being extensively discussed on the mailing list. Once that's settled, this code should be adapted. Reviewed-by: Zong Li <zong.li@sifive.com> Signed-off-by: Deepak Gupta <debug@rivosinc.com> Tested-by: Andreas Korb <andreas.korb@aisec.fraunhofer.de> # QEMU, custom CVA6 Tested-by: Valentin Haudiquet <valentin.haudiquet@canonical.com> Link: https://patch.msgid.link/20251112-v5_user_cfi_series-v23-11-b55691eacf4f@rivosinc.com [pjw@kernel.org: cleaned up patch description] Signed-off-by: Paul Walmsley <pjw@kernel.org>
261 lines
7.3 KiB
C
261 lines
7.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2024 Rivos, Inc.
|
|
* Deepak Gupta <debug@rivosinc.com>
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/types.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/user.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/prctl.h>
|
|
#include <asm/csr.h>
|
|
#include <asm/usercfi.h>
|
|
|
|
#define SHSTK_ENTRY_SIZE sizeof(void *)
|
|
|
|
bool is_shstk_enabled(struct task_struct *task)
|
|
{
|
|
return task->thread_info.user_cfi_state.ubcfi_en;
|
|
}
|
|
|
|
void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size)
|
|
{
|
|
task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr;
|
|
task->thread_info.user_cfi_state.shdw_stk_size = size;
|
|
}
|
|
|
|
unsigned long get_shstk_base(struct task_struct *task, unsigned long *size)
|
|
{
|
|
if (size)
|
|
*size = task->thread_info.user_cfi_state.shdw_stk_size;
|
|
return task->thread_info.user_cfi_state.shdw_stk_base;
|
|
}
|
|
|
|
void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
|
|
{
|
|
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
|
|
}
|
|
|
|
/*
|
|
* If size is 0, then to be compatible with regular stack we want it to be as big as
|
|
* regular stack. Else PAGE_ALIGN it and return back
|
|
*/
|
|
static unsigned long calc_shstk_size(unsigned long size)
|
|
{
|
|
if (size)
|
|
return PAGE_ALIGN(size);
|
|
|
|
return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G));
|
|
}
|
|
|
|
/*
|
|
* Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
|
|
* implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
|
|
* shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
|
|
* stack.
|
|
*/
|
|
static noinline unsigned long amo_user_shstk(unsigned long __user *addr, unsigned long val)
|
|
{
|
|
/*
|
|
* Never expect -1 on shadow stack. Expect return addresses and zero
|
|
*/
|
|
unsigned long swap = -1;
|
|
|
|
__enable_user_access();
|
|
asm goto(".option push\n"
|
|
".option arch, +zicfiss\n"
|
|
"1: ssamoswap.d %[swap], %[val], %[addr]\n"
|
|
_ASM_EXTABLE(1b, %l[fault])
|
|
".option pop\n"
|
|
: [swap] "=r" (swap), [addr] "+A" (*(__force unsigned long *)addr)
|
|
: [val] "r" (val)
|
|
: "memory"
|
|
: fault
|
|
);
|
|
__disable_user_access();
|
|
return swap;
|
|
fault:
|
|
__disable_user_access();
|
|
return -1;
|
|
}
|
|
|
|
/*
|
|
* Create a restore token on the shadow stack. A token is always XLEN wide
|
|
* and aligned to XLEN.
|
|
*/
|
|
static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
|
|
{
|
|
unsigned long addr;
|
|
|
|
/* Token must be aligned */
|
|
if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
|
|
return -EINVAL;
|
|
|
|
/* On RISC-V we're constructing token to be function of address itself */
|
|
addr = ssp - SHSTK_ENTRY_SIZE;
|
|
|
|
if (amo_user_shstk((unsigned long __user *)addr, (unsigned long)ssp) == -1)
|
|
return -EFAULT;
|
|
|
|
if (token_addr)
|
|
*token_addr = addr;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
|
|
unsigned long token_offset, bool set_tok)
|
|
{
|
|
int flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long populate;
|
|
|
|
if (addr)
|
|
flags |= MAP_FIXED_NOREPLACE;
|
|
|
|
mmap_write_lock(mm);
|
|
addr = do_mmap(NULL, addr, size, PROT_READ, flags,
|
|
VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL);
|
|
mmap_write_unlock(mm);
|
|
|
|
if (!set_tok || IS_ERR_VALUE(addr))
|
|
goto out;
|
|
|
|
if (create_rstor_token(addr + token_offset, NULL)) {
|
|
vm_munmap(addr, size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
out:
|
|
return addr;
|
|
}
|
|
|
|
SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
|
|
{
|
|
bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
|
|
unsigned long aligned_size = 0;
|
|
|
|
if (!cpu_supports_shadow_stack())
|
|
return -EOPNOTSUPP;
|
|
|
|
/* Anything other than set token should result in invalid param */
|
|
if (flags & ~SHADOW_STACK_SET_TOKEN)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is an available
|
|
* CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
|
|
* itself. This provides static property on register programming and writes to CSR can't
|
|
* be unintentional from programmer's perspective. As long as programmer has guarded areas
|
|
* which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
|
|
* CSR_SSP is writable by user mode, it itself can setup a shadow stack token subsequent
|
|
* to allocation. Although in order to provide portablity with other architectures (because
|
|
* `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
|
|
* flag in flags and if provided in flags, will setup a token at the base.
|
|
*/
|
|
|
|
/* If there isn't space for a token */
|
|
if (set_tok && size < SHSTK_ENTRY_SIZE)
|
|
return -ENOSPC;
|
|
|
|
if (addr && (addr & (PAGE_SIZE - 1)))
|
|
return -EINVAL;
|
|
|
|
aligned_size = PAGE_ALIGN(size);
|
|
if (aligned_size < size)
|
|
return -EOVERFLOW;
|
|
|
|
return allocate_shadow_stack(addr, aligned_size, size, set_tok);
|
|
}
|
|
|
|
/*
|
|
* This gets called during clone/clone3/fork. And is needed to allocate a shadow stack for
|
|
* cases where CLONE_VM is specified and thus a different stack is specified by user. We
|
|
* thus need a separate shadow stack too. How a separate shadow stack is specified by
|
|
* user is still being debated. Once that's settled, remove this part of the comment.
|
|
* This function simply returns 0 if shadow stacks are not supported or if separate shadow
|
|
* stack allocation is not needed (like in case of !CLONE_VM)
|
|
*/
|
|
unsigned long shstk_alloc_thread_stack(struct task_struct *tsk,
|
|
const struct kernel_clone_args *args)
|
|
{
|
|
unsigned long addr, size;
|
|
|
|
/* If shadow stack is not supported, return 0 */
|
|
if (!cpu_supports_shadow_stack())
|
|
return 0;
|
|
|
|
/*
|
|
* If shadow stack is not enabled on the new thread, skip any
|
|
* switch to a new shadow stack.
|
|
*/
|
|
if (!is_shstk_enabled(tsk))
|
|
return 0;
|
|
|
|
/*
|
|
* For CLONE_VFORK the child will share the parents shadow stack.
|
|
* Set base = 0 and size = 0, this is special means to track this state
|
|
* so the freeing logic run for child knows to leave it alone.
|
|
*/
|
|
if (args->flags & CLONE_VFORK) {
|
|
set_shstk_base(tsk, 0, 0);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* For !CLONE_VM the child will use a copy of the parents shadow
|
|
* stack.
|
|
*/
|
|
if (!(args->flags & CLONE_VM))
|
|
return 0;
|
|
|
|
/*
|
|
* reaching here means, CLONE_VM was specified and thus a separate shadow
|
|
* stack is needed for new cloned thread. Note: below allocation is happening
|
|
* using current mm.
|
|
*/
|
|
size = calc_shstk_size(args->stack_size);
|
|
addr = allocate_shadow_stack(0, size, 0, false);
|
|
if (IS_ERR_VALUE(addr))
|
|
return addr;
|
|
|
|
set_shstk_base(tsk, addr, size);
|
|
|
|
return addr + size;
|
|
}
|
|
|
|
void shstk_release(struct task_struct *tsk)
|
|
{
|
|
unsigned long base = 0, size = 0;
|
|
/* If shadow stack is not supported or not enabled, nothing to release */
|
|
if (!cpu_supports_shadow_stack() || !is_shstk_enabled(tsk))
|
|
return;
|
|
|
|
/*
|
|
* When fork() with CLONE_VM fails, the child (tsk) already has a
|
|
* shadow stack allocated, and exit_thread() calls this function to
|
|
* free it. In this case the parent (current) and the child share
|
|
* the same mm struct. Move forward only when they're same.
|
|
*/
|
|
if (!tsk->mm || tsk->mm != current->mm)
|
|
return;
|
|
|
|
/*
|
|
* We know shadow stack is enabled but if base is NULL, then
|
|
* this task is not managing its own shadow stack (CLONE_VFORK). So
|
|
* skip freeing it.
|
|
*/
|
|
base = get_shstk_base(tsk, &size);
|
|
if (!base)
|
|
return;
|
|
|
|
vm_munmap(base, size);
|
|
set_shstk_base(tsk, 0, 0);
|
|
}
|