From: Kai Huang TDX host core code implements three seamcall*() helpers to make SEAMCALL to the TDX module. Currently, they are implemented in and are exposed to other kernel code which includes . However, other than the TDX host core, seamcall*() are not expected to be used by other kernel code directly. For instance, for all SEAMCALLs that are used by KVM, the TDX host core exports a wrapper function for each of them. Move seamcall*() and related code out of and make them only visible to TDX host core. Since TDX host core tdx.c is already very heavy, don't put low level seamcall*() code there but to a new dedicated "seamcall.h". Also, currently tdx.c has seamcall_prerr*() helpers which additionally print error message when calling seamcall*() fails. Move them to "seamcall.h" as well. In such way all low level SEAMCALL helpers are in a dedicated place, which is much more readable. Signed-off-by: Kai Huang Signed-off-by: Chao Gao Reviewed-by: Zhenzhong Duan --- v2: - new --- arch/x86/include/asm/tdx.h | 47 --------------- arch/x86/virt/vmx/tdx/seamcall.h | 99 ++++++++++++++++++++++++++++++++ arch/x86/virt/vmx/tdx/tdx.c | 46 +-------------- 3 files changed, 100 insertions(+), 92 deletions(-) create mode 100644 arch/x86/virt/vmx/tdx/seamcall.h diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index 6b338d7f01b7..cb2219302dfc 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -97,54 +97,7 @@ static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, #endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */ #ifdef CONFIG_INTEL_TDX_HOST -u64 __seamcall(u64 fn, struct tdx_module_args *args); -u64 __seamcall_ret(u64 fn, struct tdx_module_args *args); -u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args); void tdx_init(void); - -#include -#include -#include - -typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args); - -static __always_inline u64 __seamcall_dirty_cache(sc_func_t func, u64 fn, - struct tdx_module_args *args) -{ - lockdep_assert_preemption_disabled(); - - /* - * SEAMCALLs are made to the TDX module and can generate dirty - * cachelines of TDX private memory. Mark cache state incoherent - * so that the cache can be flushed during kexec. - * - * This needs to be done before actually making the SEAMCALL, - * because kexec-ing CPU could send NMI to stop remote CPUs, - * in which case even disabling IRQ won't help here. - */ - this_cpu_write(cache_state_incoherent, true); - - return func(fn, args); -} - -static __always_inline u64 sc_retry(sc_func_t func, u64 fn, - struct tdx_module_args *args) -{ - int retry = RDRAND_RETRY_LOOPS; - u64 ret; - - do { - preempt_disable(); - ret = __seamcall_dirty_cache(func, fn, args); - preempt_enable(); - } while (ret == TDX_RND_NO_ENTROPY && --retry); - - return ret; -} - -#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args)) -#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args)) -#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) int tdx_cpu_enable(void); int tdx_enable(void); const char *tdx_dump_mce_info(struct mce *m); diff --git a/arch/x86/virt/vmx/tdx/seamcall.h b/arch/x86/virt/vmx/tdx/seamcall.h new file mode 100644 index 000000000000..0912e03fabfe --- /dev/null +++ b/arch/x86/virt/vmx/tdx/seamcall.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2025 Intel Corporation */ +#ifndef _X86_VIRT_SEAMCALL_H +#define _X86_VIRT_SEAMCALL_H + +#include +#include +#include +#include +#include + +u64 __seamcall(u64 fn, struct tdx_module_args *args); +u64 __seamcall_ret(u64 fn, struct tdx_module_args *args); +u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args); + +typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args); + +static __always_inline u64 __seamcall_dirty_cache(sc_func_t func, u64 fn, + struct tdx_module_args *args) +{ + lockdep_assert_preemption_disabled(); + + /* + * SEAMCALLs are made to the TDX module and can generate dirty + * cachelines of TDX private memory. Mark cache state incoherent + * so that the cache can be flushed during kexec. + * + * This needs to be done before actually making the SEAMCALL, + * because kexec-ing CPU could send NMI to stop remote CPUs, + * in which case even disabling IRQ won't help here. + */ + this_cpu_write(cache_state_incoherent, true); + + return func(fn, args); +} + +static __always_inline u64 sc_retry(sc_func_t func, u64 fn, + struct tdx_module_args *args) +{ + int retry = RDRAND_RETRY_LOOPS; + u64 ret; + + do { + ret = func(fn, args); + } while (ret == TDX_RND_NO_ENTROPY && --retry); + + return ret; +} + +#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args)) +#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args)) +#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) + +typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args); + +static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args) +{ + pr_err("SEAMCALL (%llu) failed: %#016llx\n", fn, err); +} + +static inline void seamcall_err_ret(u64 fn, u64 err, + struct tdx_module_args *args) +{ + seamcall_err(fn, err, args); + pr_err("RCX %#016llx RDX %#016llx R08 %#016llx\n", + args->rcx, args->rdx, args->r8); + pr_err("R09 %#016llx R10 %#016llx R11 %#016llx\n", + args->r9, args->r10, args->r11); +} + +static __always_inline int sc_retry_prerr(sc_func_t func, + sc_err_func_t err_func, + u64 fn, struct tdx_module_args *args) +{ + u64 sret = sc_retry(func, fn, args); + + if (sret == TDX_SUCCESS) + return 0; + + if (sret == TDX_SEAMCALL_VMFAILINVALID) + return -ENODEV; + + if (sret == TDX_SEAMCALL_GP) + return -EOPNOTSUPP; + + if (sret == TDX_SEAMCALL_UD) + return -EACCES; + + err_func(fn, sret, args); + return -EIO; +} + +#define seamcall_prerr(__fn, __args) \ + sc_retry_prerr(__seamcall, seamcall_err, (__fn), (__args)) + +#define seamcall_prerr_ret(__fn, __args) \ + sc_retry_prerr(__seamcall_ret, seamcall_err_ret, (__fn), (__args)) + +#endif diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 2218bb42af40..b44723ef4a14 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -39,6 +39,7 @@ #include #include #include +#include "seamcall.h" #include "tdx.h" static u32 tdx_global_keyid __ro_after_init; @@ -59,51 +60,6 @@ static LIST_HEAD(tdx_memlist); static struct tdx_sys_info tdx_sysinfo; -typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args); - -static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args) -{ - pr_err("SEAMCALL (%llu) failed: %#016llx\n", fn, err); -} - -static inline void seamcall_err_ret(u64 fn, u64 err, - struct tdx_module_args *args) -{ - seamcall_err(fn, err, args); - pr_err("RCX %#016llx RDX %#016llx R08 %#016llx\n", - args->rcx, args->rdx, args->r8); - pr_err("R09 %#016llx R10 %#016llx R11 %#016llx\n", - args->r9, args->r10, args->r11); -} - -static __always_inline int sc_retry_prerr(sc_func_t func, - sc_err_func_t err_func, - u64 fn, struct tdx_module_args *args) -{ - u64 sret = sc_retry(func, fn, args); - - if (sret == TDX_SUCCESS) - return 0; - - if (sret == TDX_SEAMCALL_VMFAILINVALID) - return -ENODEV; - - if (sret == TDX_SEAMCALL_GP) - return -EOPNOTSUPP; - - if (sret == TDX_SEAMCALL_UD) - return -EACCES; - - err_func(fn, sret, args); - return -EIO; -} - -#define seamcall_prerr(__fn, __args) \ - sc_retry_prerr(__seamcall, seamcall_err, (__fn), (__args)) - -#define seamcall_prerr_ret(__fn, __args) \ - sc_retry_prerr(__seamcall_ret, seamcall_err_ret, (__fn), (__args)) - /* * Do the module global initialization once and return its result. * It can be done on any cpu. It's always called with interrupts -- 2.47.3