Files
linux/arch/x86/lib/getuser.S
Linus Torvalds 91309a7082 x86: use cmov for user address masking
This was a suggestion by David Laight, and while I was slightly worried
that some micro-architecture would predict cmov like a conditional
branch, there is little reason to actually believe any core would be
that broken.

Intel documents that their existing cores treat CMOVcc as a data
dependency that will constrain speculation in their "Speculative
Execution Side Channel Mitigations" whitepaper:

  "Other instructions such as CMOVcc, AND, ADC, SBB and SETcc can also
   be used to prevent bounds check bypass by constraining speculative
   execution on current family 6 processors (Intel® Core™, Intel® Atom™,
   Intel® Xeon® and Intel® Xeon Phi™ processors)"

and while that leaves the future uarch issues open, that's certainly
true of our traditional SBB usage too.

Any core that predicts CMOV will be unusable for various crypto
algorithms that need data-independent timing stability, so let's just
treat CMOV as the safe choice that simplifies the address masking by
avoiding an extra instruction and doesn't need a temporary register.

Suggested-by: David Laight <David.Laight@aculab.com>
Link: https://www.intel.com/content/dam/develop/external/us/en/documents/336996-speculative-execution-side-channel-mitigations.pdf
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2025-01-20 08:51:44 -08:00

167 lines
3.3 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
/*
* __get_user functions.
*
* (C) Copyright 1998 Linus Torvalds
* (C) Copyright 2005 Andi Kleen
* (C) Copyright 2008 Glauber Costa
*
* These functions have a non-standard call interface
* to make them more efficient, especially as they
* return an error value in addition to the "real"
* return value.
*/
/*
* __get_user_X
*
* Inputs: %[r|e]ax contains the address.
*
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
* %[r|e]dx contains zero-extended value
* %ecx contains the high half for 32-bit __get_user_8
*
*
* These functions should not modify any other registers,
* as they get called from within inline assembly.
*/
#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
movq $0x0123456789abcdef,%rdx
1:
.pushsection runtime_ptr_USER_PTR_MAX,"a"
.long 1b - 8 - .
.popsection
cmp %rdx, %rax
cmova %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
jae .Lbad_get_user
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
.endm
.macro UACCESS op src dst
1: \op \src,\dst
_ASM_EXTABLE_UA(1b, __get_user_handle_exception)
.endm
.text
SYM_FUNC_START(__get_user_1)
check_range size=1
ASM_STAC
UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
check_range size=2
ASM_STAC
UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
check_range size=4
ASM_STAC
UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
#ifndef CONFIG_X86_64
xor %ecx,%ecx
#endif
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
ASM_STAC
ASM_BARRIER_NOSPEC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
.Lbad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(__get_user_handle_exception)