LoongArch: Adjust user accessors for 32BIT/64BIT

Adjust user accessors for both 32BIT and 64BIT, including: get_user(),
put_user(), copy_user(), clear_user(), etc.

Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Huacai Chen
2025-12-08 18:09:17 +08:00
parent 14338e631a
commit 48c7294775
3 changed files with 91 additions and 22 deletions

View File

@@ -19,10 +19,16 @@
#include <asm/asm-extable.h> #include <asm/asm-extable.h>
#include <asm-generic/access_ok.h> #include <asm-generic/access_ok.h>
#define __LSW 0
#define __MSW 1
extern u64 __ua_limit; extern u64 __ua_limit;
#define __UA_ADDR ".dword" #ifdef CONFIG_64BIT
#define __UA_LIMIT __ua_limit #define __UA_LIMIT __ua_limit
#else
#define __UA_LIMIT 0x80000000UL
#endif
/* /*
* get_user: - Get a simple variable from user space. * get_user: - Get a simple variable from user space.
@@ -126,6 +132,7 @@ extern u64 __ua_limit;
* *
* Returns zero on success, or -EFAULT on error. * Returns zero on success, or -EFAULT on error.
*/ */
#define __put_user(x, ptr) \ #define __put_user(x, ptr) \
({ \ ({ \
int __pu_err = 0; \ int __pu_err = 0; \
@@ -146,7 +153,7 @@ do { \
case 1: __get_data_asm(val, "ld.b", ptr); break; \ case 1: __get_data_asm(val, "ld.b", ptr); break; \
case 2: __get_data_asm(val, "ld.h", ptr); break; \ case 2: __get_data_asm(val, "ld.h", ptr); break; \
case 4: __get_data_asm(val, "ld.w", ptr); break; \ case 4: __get_data_asm(val, "ld.w", ptr); break; \
case 8: __get_data_asm(val, "ld.d", ptr); break; \ case 8: __get_data_asm_8(val, ptr); break; \
default: BUILD_BUG(); break; \ default: BUILD_BUG(); break; \
} \ } \
} while (0) } while (0)
@@ -167,13 +174,39 @@ do { \
(val) = (__typeof__(*(ptr))) __gu_tmp; \ (val) = (__typeof__(*(ptr))) __gu_tmp; \
} }
#ifdef CONFIG_64BIT
#define __get_data_asm_8(val, ptr) \
__get_data_asm(val, "ld.d", ptr)
#else /* !CONFIG_64BIT */
#define __get_data_asm_8(val, ptr) \
{ \
u32 __lo, __hi; \
u32 __user *__ptr = (u32 __user *)(ptr); \
\
__asm__ __volatile__ ( \
"1:\n" \
" ld.w %1, %3 \n" \
"2:\n" \
" ld.w %2, %4 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
: "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
if (__gu_err) \
__hi = 0; \
(val) = (__typeof__(val))((__typeof__((val)-(val))) \
((((u64)__hi << 32) | __lo))); \
}
#endif /* CONFIG_64BIT */
#define __put_user_common(ptr, size) \ #define __put_user_common(ptr, size) \
do { \ do { \
switch (size) { \ switch (size) { \
case 1: __put_data_asm("st.b", ptr); break; \ case 1: __put_data_asm("st.b", ptr); break; \
case 2: __put_data_asm("st.h", ptr); break; \ case 2: __put_data_asm("st.h", ptr); break; \
case 4: __put_data_asm("st.w", ptr); break; \ case 4: __put_data_asm("st.w", ptr); break; \
case 8: __put_data_asm("st.d", ptr); break; \ case 8: __put_data_asm_8(ptr); break; \
default: BUILD_BUG(); break; \ default: BUILD_BUG(); break; \
} \ } \
} while (0) } while (0)
@@ -190,6 +223,30 @@ do { \
: "Jr" (__pu_val)); \ : "Jr" (__pu_val)); \
} }
#ifdef CONFIG_64BIT
#define __put_data_asm_8(ptr) \
__put_data_asm("st.d", ptr)
#else /* !CONFIG_64BIT */
#define __put_data_asm_8(ptr) \
{ \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \
\
__asm__ __volatile__ ( \
"1:\n" \
" st.w %z3, %1 \n" \
"2:\n" \
" st.w %z4, %2 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
: "+r" (__pu_err), \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32)); \
}
#endif /* CONFIG_64BIT */
#define __get_kernel_nofault(dst, src, type, err_label) \ #define __get_kernel_nofault(dst, src, type, err_label) \
do { \ do { \
int __gu_err = 0; \ int __gu_err = 0; \

View File

@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h> #include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user) SYM_FUNC_START(__clear_user)
#ifdef CONFIG_32BIT
b __clear_user_generic
#else
/* /*
* Some CPUs support hardware unaligned access * Some CPUs support hardware unaligned access
*/ */
ALTERNATIVE "b __clear_user_generic", \ ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL "b __clear_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__clear_user) SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user) EXPORT_SYMBOL(__clear_user)
@@ -32,8 +36,8 @@ SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f beqz a1, 2f
1: st.b zero, a0, 0 1: st.b zero, a0, 0
addi.d a0, a0, 1 PTR_ADDI a0, a0, 1
addi.d a1, a1, -1 PTR_ADDI a1, a1, -1
bgtz a1, 1b bgtz a1, 1b
2: move a0, a1 2: move a0, a1
@@ -42,6 +46,7 @@ SYM_FUNC_START(__clear_user_generic)
_asm_extable 1b, 2b _asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic) SYM_FUNC_END(__clear_user_generic)
#ifdef CONFIG_64BIT
/* /*
* unsigned long __clear_user_fast(void *addr, unsigned long size) * unsigned long __clear_user_fast(void *addr, unsigned long size)
* *
@@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast)
SYM_FUNC_END(__clear_user_fast) SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast STACK_FRAME_NON_STANDARD __clear_user_fast
#endif

View File

@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h> #include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user) SYM_FUNC_START(__copy_user)
#ifdef CONFIG_32BIT
b __copy_user_generic
#else
/* /*
* Some CPUs support hardware unaligned access * Some CPUs support hardware unaligned access
*/ */
ALTERNATIVE "b __copy_user_generic", \ ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL "b __copy_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__copy_user) SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user) EXPORT_SYMBOL(__copy_user)
@@ -34,9 +38,9 @@ SYM_FUNC_START(__copy_user_generic)
1: ld.b t0, a1, 0 1: ld.b t0, a1, 0
2: st.b t0, a0, 0 2: st.b t0, a0, 0
addi.d a0, a0, 1 PTR_ADDI a0, a0, 1
addi.d a1, a1, 1 PTR_ADDI a1, a1, 1
addi.d a2, a2, -1 PTR_ADDI a2, a2, -1
bgtz a2, 1b bgtz a2, 1b
3: move a0, a2 3: move a0, a2
@@ -46,6 +50,7 @@ SYM_FUNC_START(__copy_user_generic)
_asm_extable 2b, 3b _asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic) SYM_FUNC_END(__copy_user_generic)
#ifdef CONFIG_64BIT
/* /*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n) * unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
* *
@@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast)
SYM_FUNC_END(__copy_user_fast) SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast STACK_FRAME_NON_STANDARD __copy_user_fast
#endif