Since Armv9.6, FEAT_LSUI provides load/store instructions that allow privileged code to access user memory without clearing the PSTATE.PAN bit. Add CPU feature detection for FEAT_LSUI. Signed-off-by: Yeoreum Yun Reviewed-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 10 ++++++++++ arch/arm64/tools/cpucaps | 1 + 2 files changed, 11 insertions(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5ed401ff79e3..fc014a1fb0e0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -279,6 +279,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { static const struct arm64_ftr_bits ftr_id_aa64isar3[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FPRCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSUI_SHIFT, 4, ID_AA64ISAR3_EL1_LSUI_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSFE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0), ARM64_FTR_END, @@ -3088,6 +3089,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_GICV5_LEGACY, .matches = test_has_gicv5_legacy, }, +#ifdef CONFIG_AS_HAS_LSUI + { + .desc = "Unprivileged Load Store Instructions (LSUI)", + .capability = ARM64_HAS_LSUI, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR3_EL1, LSUI, IMP) + }, +#endif {}, }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 1b32c1232d28..9a15784829f8 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -45,6 +45,7 @@ HAS_HCX HAS_LDAPR HAS_LPA2 HAS_LSE_ATOMICS +HAS_LSUI HAS_MOPS HAS_NESTED_VIRT HAS_BBML2_NOABORT -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} expose FEAT_LSUI to guest. Signed-off-by: Yeoreum Yun Acked-by: Marc Zyngier Reviewed-by: Catalin Marinas --- arch/arm64/kvm/sys_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e67eb39ddc11..c1ac45058fae 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1790,7 +1790,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64ISAR3_EL1: val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE | - ID_AA64ISAR3_EL1_FAMINMAX; + ID_AA64ISAR3_EL1_FAMINMAX | ID_AA64ISAR3_EL1_LSUI; break; case SYS_ID_AA64MMFR2_EL1: val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; @@ -3231,6 +3231,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64ISAR2_EL1_GPA3)), ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE | + ID_AA64ISAR3_EL1_LSUI | ID_AA64ISAR3_EL1_FAMINMAX)), ID_UNALLOCATED(6,4), ID_UNALLOCATED(6,5), -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} Add test coverage for FEAT_LSUI. Signed-off-by: Yeoreum Yun --- tools/testing/selftests/kvm/arm64/set_id_regs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 5e24f77868b5..f5ba72127464 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -125,6 +125,7 @@ static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = { static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = { REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSUI, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0), REG_FTR_END, -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} Since Armv9.6, FEAT_LSUI supplies the load/store instructions for previleged level to access to access user memory without clearing PSTATE.PAN bit. It's enough to add CONFIG_AS_HAS_LSUI only because the code for LSUI uses individual `.arch_extension` entries. Signed-off-by: Yeoreum Yun Reviewed-by: Catalin Marinas --- arch/arm64/Kconfig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6663ffd23f25..bdbbe78160ab 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2201,6 +2201,11 @@ config ARM64_GCS endmenu # "ARMv9.4 architectural features" +config AS_HAS_LSUI + def_bool $(as-instr,.arch_extension lsui) + help + Supported by LLVM 20+ and binutils 2.45+. + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} Refactor futex atomic operations using ll/sc method with clearing PSTATE.PAN to prepare to apply FEAT_LSUI on them. Signed-off-by: Yeoreum Yun Reviewed-by: Catalin Marinas --- arch/arm64/include/asm/futex.h | 128 +++++++++++++++++++++------------ 1 file changed, 82 insertions(+), 46 deletions(-) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index bc06691d2062..f8cb674bdb3f 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -7,17 +7,21 @@ #include #include +#include #include #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ -#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ -do { \ +#define LLSC_FUTEX_ATOMIC_OP(op, insn) \ +static __always_inline int \ +__llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ +{ \ unsigned int loops = FUTEX_MAX_LOOPS; \ + int ret, oldval, tmp; \ \ uaccess_enable_privileged(); \ - asm volatile( \ + asm volatile("// __llsc_futex_atomic_" #op "\n" \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ @@ -35,45 +39,103 @@ do { \ : "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \ -} while (0) + \ + if (!ret) \ + *oval = oldval; \ + \ + return ret; \ +} + +LLSC_FUTEX_ATOMIC_OP(add, "add %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(or, "orr %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(and, "and %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(eor, "eor %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(set, "mov %w3, %w5") + +static __always_inline int +__llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + int ret = 0; + unsigned int loops = FUTEX_MAX_LOOPS; + u32 val, tmp; + + uaccess_enable_privileged(); + asm volatile("//__llsc_futex_cmpxchg\n" +" prfm pstl1strm, %2\n" +"1: ldxr %w1, %2\n" +" eor %w3, %w1, %w5\n" +" cbnz %w3, 4f\n" +"2: stlxr %w3, %w6, %2\n" +" cbz %w3, 3f\n" +" sub %w4, %w4, %w3\n" +" cbnz %w4, 1b\n" +" mov %w0, %w7\n" +"3:\n" +" dmb ish\n" +"4:\n" + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) + : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) + : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) + : "memory"); + uaccess_disable_privileged(); + + if (!ret) + *oval = val; + + return ret; +} + +#define FUTEX_ATOMIC_OP(op) \ +static __always_inline int \ +__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ +{ \ + return __llsc_futex_atomic_##op(oparg, uaddr, oval); \ +} + +FUTEX_ATOMIC_OP(add) +FUTEX_ATOMIC_OP(or) +FUTEX_ATOMIC_OP(and) +FUTEX_ATOMIC_OP(eor) +FUTEX_ATOMIC_OP(set) + +static __always_inline int +__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval); +} static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { - int oldval = 0, ret, tmp; - u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); + int ret; + u32 __user *uaddr; if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; + uaddr = __uaccess_mask_ptr(_uaddr); + switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %w3, %w5", - ret, oldval, uaddr, tmp, oparg); + ret = __futex_atomic_set(oparg, uaddr, oval); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret = __futex_atomic_add(oparg, uaddr, oval); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret = __futex_atomic_or(oparg, uaddr, oval); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %w3, %w1, %w5", - ret, oldval, uaddr, tmp, ~oparg); + ret = __futex_atomic_and(~oparg, uaddr, oval); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret = __futex_atomic_eor(oparg, uaddr, oval); break; default: ret = -ENOSYS; } - if (!ret) - *oval = oldval; - return ret; } @@ -81,40 +143,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { - int ret = 0; - unsigned int loops = FUTEX_MAX_LOOPS; - u32 val, tmp; u32 __user *uaddr; if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; uaddr = __uaccess_mask_ptr(_uaddr); - uaccess_enable_privileged(); - asm volatile("// futex_atomic_cmpxchg_inatomic\n" -" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n" -" sub %w3, %w1, %w5\n" -" cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" -" cbz %w3, 3f\n" -" sub %w4, %w4, %w3\n" -" cbnz %w4, 1b\n" -" mov %w0, %w7\n" -"3:\n" -" dmb ish\n" -"4:\n" - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) - : "memory"); - uaccess_disable_privileged(); - - if (!ret) - *uval = val; - return ret; + return __futex_cmpxchg(uaddr, oldval, newval, uval); } #endif /* __ASM_FUTEX_H */ -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} Current futex atomic operations are implemented with ll/sc instructions and clearing PSTATE.PAN. Since Armv9.6, FEAT_LSUI supplies not only load/store instructions but also atomic operation for user memory access in kernel it doesn't need to clear PSTATE.PAN bit anymore. With theses instructions some of futex atomic operations don't need to be implmented with ldxr/stlxr pair instead can be implmented with one atomic operation supplied by FEAT_LSUI. However, some of futex atomic operation don't have matched instructuion i.e) eor or cmpxchg with word size. For those operation, uses cas{al}t to implement them. Signed-off-by: Yeoreum Yun --- arch/arm64/include/asm/futex.h | 178 ++++++++++++++++++++++++++++++++- 1 file changed, 177 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index f8cb674bdb3f..ee79944df6fe 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -9,6 +9,8 @@ #include #include +#include +#include #include #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ @@ -86,11 +88,185 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) return ret; } +#ifdef CONFIG_AS_HAS_LSUI + +/* + * When the LSUI feature is present, the CPU also implements PAN, because + * FEAT_PAN has been mandatory since Armv8.1. Therefore, there is no need to + * call uaccess_ttbr0_enable()/uaccess_ttbr0_disable() around each LSUI + * operation. + */ + +#define __LSUI_PREAMBLE ".arch_extension lsui\n" + +#define LSUI_FUTEX_ATOMIC_OP(op, asm_op, mb) \ +static __always_inline int \ +__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ +{ \ + int ret = 0; \ + int oldval; \ + \ + asm volatile("// __lsui_futex_atomic_" #op "\n" \ + __LSUI_PREAMBLE \ +"1: " #asm_op #mb " %w3, %w2, %1\n" \ +"2:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ + : "+r" (ret), "+Q" (*uaddr), "=r" (oldval) \ + : "r" (oparg) \ + : "memory"); \ + \ + if (!ret) \ + *oval = oldval; \ + \ + return ret; \ +} + +LSUI_FUTEX_ATOMIC_OP(add, ldtadd, al) +LSUI_FUTEX_ATOMIC_OP(or, ldtset, al) +LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr, al) +LSUI_FUTEX_ATOMIC_OP(set, swpt, al) + +static __always_inline int +__lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval) +{ + int ret = 0; + + asm volatile("// __lsui_cmpxchg64\n" + __LSUI_PREAMBLE +"1: casalt %x2, %x3, %1\n" +"2:\n" + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) + : "+r" (ret), "+Q" (*uaddr), "+r" (*oldval) + : "r" (newval) + : "memory"); + + return ret; +} + +static __always_inline int +__lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + u64 __user *uaddr64; + bool futex_on_lo; + int ret = -EAGAIN, i; + u32 other, orig_other; + union { + struct futex_on_lo { + u32 val; + u32 other; + } lo_futex; + + struct futex_on_hi { + u32 other; + u32 val; + } hi_futex; + + u64 raw; + } oval64, orig64, nval64; + + uaddr64 = (u64 __user *) PTR_ALIGN_DOWN(uaddr, sizeof(u64)); + futex_on_lo = (IS_ALIGNED((unsigned long)uaddr, sizeof(u64)) == + IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)); + + for (i = 0; i < FUTEX_MAX_LOOPS; i++) { + if (get_user(oval64.raw, uaddr64)) + return -EFAULT; + + nval64.raw = oval64.raw; + + if (futex_on_lo) { + oval64.lo_futex.val = oldval; + nval64.lo_futex.val = newval; + } else { + oval64.hi_futex.val = oldval; + nval64.hi_futex.val = newval; + } + + orig64.raw = oval64.raw; + + if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw)) + return -EFAULT; + + if (futex_on_lo) { + oldval = oval64.lo_futex.val; + other = oval64.lo_futex.other; + orig_other = orig64.lo_futex.other; + } else { + oldval = oval64.hi_futex.val; + other = oval64.hi_futex.other; + orig_other = orig64.hi_futex.other; + } + + if (other == orig_other) { + ret = 0; + break; + } + } + + if (!ret) + *oval = oldval; + + return ret; +} + +static __always_inline int +__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval) +{ + return __lsui_futex_atomic_andnot(~oparg, uaddr, oval); +} + +static __always_inline int +__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval) +{ + u32 oldval, newval, val; + int ret, i; + + /* + * there are no ldteor/stteor instructions... + */ + for (i = 0; i < FUTEX_MAX_LOOPS; i++) { + if (get_user(oldval, uaddr)) + return -EFAULT; + + newval = oldval ^ oparg; + + ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val); + if (ret) + return ret; + + if (val == oldval) { + *oval = val; + return 0; + } + } + + return -EAGAIN; +} + +static __always_inline int +__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + return __lsui_cmpxchg32(uaddr, oldval, newval, oval); +} + +#define __lsui_llsc_body(op, ...) \ +({ \ + alternative_has_cap_likely(ARM64_HAS_LSUI) ? \ + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \ +}) + +#else /* CONFIG_AS_HAS_LSUI */ + +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__) + +#endif /* CONFIG_AS_HAS_LSUI */ + + #define FUTEX_ATOMIC_OP(op) \ static __always_inline int \ __futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ { \ - return __llsc_futex_atomic_##op(oparg, uaddr, oval); \ + return __lsui_llsc_body(futex_atomic_##op, oparg, uaddr, oval); \ } FUTEX_ATOMIC_OP(add) -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} This patch prepares for applying LSUI to the armv8_deprecated SWP instruction. Some LSUI-related definitions can be reused by armv8_deprecated.c, so move the common definitions into a separate header file, lsui.h. Signed-off-by: Yeoreum Yun --- arch/arm64/include/asm/futex.h | 15 +-------------- arch/arm64/include/asm/lsui.h | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 14 deletions(-) create mode 100644 arch/arm64/include/asm/lsui.h diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index ee79944df6fe..e75cfc5e3495 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -9,9 +9,8 @@ #include #include -#include -#include #include +#include #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ @@ -97,8 +96,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) * operation. */ -#define __LSUI_PREAMBLE ".arch_extension lsui\n" - #define LSUI_FUTEX_ATOMIC_OP(op, asm_op, mb) \ static __always_inline int \ __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ @@ -249,16 +246,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) return __lsui_cmpxchg32(uaddr, oldval, newval, oval); } -#define __lsui_llsc_body(op, ...) \ -({ \ - alternative_has_cap_likely(ARM64_HAS_LSUI) ? \ - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \ -}) - -#else /* CONFIG_AS_HAS_LSUI */ - -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__) - #endif /* CONFIG_AS_HAS_LSUI */ diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h new file mode 100644 index 000000000000..1a2ad408a47b --- /dev/null +++ b/arch/arm64/include/asm/lsui.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LSUI_H +#define __ASM_LSUI_H + +#ifdef CONFIG_AS_HAS_LSUI + +#define __LSUI_PREAMBLE ".arch_extension lsui\n" + +#include +#include +#include +#include + +#define __lsui_llsc_body(op, ...) \ +({ \ + alternative_has_cap_likely(ARM64_HAS_LSUI) ? \ + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \ +}) + +#else /* CONFIG_AS_HAS_LSUI */ + +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__) + +#endif /* CONFIG_AS_HAS_LSUI */ +#endif /* __ASM_LSUI_H */ -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} This is preparation patch to apply FEAT_LSUI in user_swpX operation. For this, convert user_swpX macro into inline function. No functional change. Signed-off-by: Yeoreum Yun --- arch/arm64/kernel/armv8_deprecated.c | 38 +++++++++++++++++----------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index e737c6295ec7..d15e35f1075c 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -93,13 +93,18 @@ static unsigned int __maybe_unused aarch32_check_condition(u32 opcode, u32 psr) /* Arbitrary constant to ensure forward-progress of the LL/SC loop */ #define __SWP_LL_SC_LOOPS 4 -#define __user_swpX_asm(data, addr, res, temp, temp2, B) \ -do { \ +#define LLSC_USER_SWPX(B) \ +static __always_inline int \ +__llsc_user_swp##B##_asm(unsigned int *data, unsigned int addr) \ +{ \ + int err = 0; \ + unsigned int temp, temp2; \ + \ uaccess_enable_privileged(); \ __asm__ __volatile__( \ " mov %w3, %w6\n" \ - "0: ldxr"B" %w2, [%4]\n" \ - "1: stxr"B" %w0, %w1, [%4]\n" \ + "0: ldxr"#B" %w2, [%4]\n" \ + "1: stxr"#B" %w0, %w1, [%4]\n" \ " cbz %w0, 2f\n" \ " sub %w3, %w3, #1\n" \ " cbnz %w3, 0b\n" \ @@ -110,17 +115,22 @@ do { \ "3:\n" \ _ASM_EXTABLE_UACCESS_ERR(0b, 3b, %w0) \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ - : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ + : "=&r" (err), "+r" (*data), "=&r" (temp), "=&r" (temp2)\ : "r" ((unsigned long)addr), "i" (-EAGAIN), \ "i" (__SWP_LL_SC_LOOPS) \ : "memory"); \ uaccess_disable_privileged(); \ -} while (0) + \ + return err; \ +} + +LLSC_USER_SWPX() +LLSC_USER_SWPX(b) -#define __user_swp_asm(data, addr, res, temp, temp2) \ - __user_swpX_asm(data, addr, res, temp, temp2, "") -#define __user_swpb_asm(data, addr, res, temp, temp2) \ - __user_swpX_asm(data, addr, res, temp, temp2, "b") +#define __user_swp_asm(data, addr) \ + __llsc_user_swp_asm(data, addr) +#define __user_swpb_asm(data, addr) \ + __llsc_user_swpb_asm(data, addr) /* * Bit 22 of the instruction encoding distinguishes between @@ -131,7 +141,7 @@ do { \ static int emulate_swpX(unsigned int address, unsigned int *data, unsigned int type) { - unsigned int res = 0; + unsigned int res; if ((type != TYPE_SWPB) && (address & 0x3)) { /* SWP to unaligned address not permitted */ @@ -140,12 +150,10 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } while (1) { - unsigned long temp, temp2; - if (type == TYPE_SWPB) - __user_swpb_asm(*data, address, res, temp, temp2); + res = __user_swpb_asm(data, address); else - __user_swp_asm(*data, address, res, temp, temp2); + res = __user_swp_asm(data, address); if (likely(res != -EAGAIN) || signal_pending(current)) break; -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} apply FEAT_LSUI instruction to emulate deprecated swpX instruction. Signed-off-by: Yeoreum Yun --- arch/arm64/kernel/armv8_deprecated.c | 52 ++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index d15e35f1075c..424cf51d554f 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -86,6 +87,53 @@ static unsigned int __maybe_unused aarch32_check_condition(u32 opcode, u32 psr) * Rn = address */ +#ifdef CONFIG_AS_HAS_LSUI +static __always_inline int +__lsui_user_swp_asm(unsigned int *data, unsigned int addr) +{ + int err = 0; + unsigned int temp; + + asm volatile("// __lsui_user_swp_asm\n" + __LSUI_PREAMBLE + "1: swpt %w1, %w2, [%3]\n" + " mov %w1, %w2\n" + "2:\n" + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) + : "+r" (err), "+r" (*data), "=&r" (temp) + : "r" ((unsigned long)addr) + : "memory"); + + return err; +} + +static __always_inline int +__lsui_user_swpb_asm(unsigned int *data, unsigned int addr) +{ + unsigned char idx; + int err; + unsigned int addr_al; + union { + unsigned int var; + unsigned char raw[4]; + } data_al; + + idx = addr & (sizeof(unsigned int) - 1); + addr_al = ALIGN_DOWN(addr, sizeof(unsigned int)); + + if (get_user(data_al.var, (unsigned int *)(unsigned long)addr_al)) + return -EFAULT; + + data_al.raw[idx] = *data; + + err = __lsui_user_swp_asm(&data_al.var, addr_al); + if (!err) + *data = data_al.raw[idx]; + + return err; +} +#endif /* CONFIG_AS_HAS_LSUI */ + /* * Error-checking SWP macros implemented using ldxr{b}/stxr{b} */ @@ -128,9 +176,9 @@ LLSC_USER_SWPX() LLSC_USER_SWPX(b) #define __user_swp_asm(data, addr) \ - __llsc_user_swp_asm(data, addr) + __lsui_llsc_body(user_swp_asm, data, addr) #define __user_swpb_asm(data, addr) \ - __llsc_user_swpb_asm(data, addr) + __lsui_llsc_body(user_swpb_asm, data, addr) /* * Bit 22 of the instruction encoding distinguishes between -- LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}