x86/vmware: Add TDX hypercall support

VMware hypercalls use I/O port, VMCALL or VMMCALL instructions.  Add a call to
__tdx_hypercall() in order to support TDX guests.

No change in high bandwidth hypercalls, as only low bandwidth ones are supported
for TDX guests.

  [ bp: Massage, clear on-stack struct tdx_module_args variable. ]

Co-developed-by: Tim Merrifield <tim.merrifield@broadcom.com>
Signed-off-by: Tim Merrifield <tim.merrifield@broadcom.com>
Signed-off-by: Alexey Makhalov <alexey.makhalov@broadcom.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240613191650.9913-9-alexey.makhalov@broadcom.com
This commit is contained in:
Alexey Makhalov 2024-06-13 12:16:50 -07:00 committed by Borislav Petkov (AMD)
parent 9dfb18031f
commit 57b7b6acb4
2 changed files with 97 additions and 0 deletions

View File

@ -18,6 +18,12 @@
* arg2 - Hypercall command * arg2 - Hypercall command
* arg3 bits [15:0] - Port number, LB and direction flags * arg3 bits [15:0] - Port number, LB and direction flags
* *
* - Low bandwidth TDX hypercalls (x86_64 only) are similar to LB
* hypercalls. They also have up to 6 input and 6 output on registers
* arguments, with different argument to register mapping:
* %r12 (arg0), %rbx (arg1), %r13 (arg2), %rdx (arg3),
* %rsi (arg4), %rdi (arg5).
*
* - High bandwidth (HB) hypercalls are I/O port based only. They have * - High bandwidth (HB) hypercalls are I/O port based only. They have
* up to 7 input and 7 output arguments passed and returned using * up to 7 input and 7 output arguments passed and returned using
* registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3), * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
@ -54,6 +60,12 @@
#define VMWARE_CMD_GETHZ 45 #define VMWARE_CMD_GETHZ 45
#define VMWARE_CMD_GETVCPU_INFO 68 #define VMWARE_CMD_GETVCPU_INFO 68
#define VMWARE_CMD_STEALCLOCK 91 #define VMWARE_CMD_STEALCLOCK 91
/*
* Hypercall command mask:
* bits [6:0] command, range [0, 127]
* bits [19:16] sub-command, range [0, 15]
*/
#define VMWARE_CMD_MASK 0xf007fU
#define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0) #define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0)
#define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1) #define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1)
@ -64,6 +76,15 @@ extern unsigned long vmware_hypercall_slow(unsigned long cmd,
u32 *out1, u32 *out2, u32 *out3, u32 *out1, u32 *out2, u32 *out3,
u32 *out4, u32 *out5); u32 *out4, u32 *out5);
#define VMWARE_TDX_VENDOR_LEAF 0x1af7e4909ULL
#define VMWARE_TDX_HCALL_FUNC 1
extern unsigned long vmware_tdx_hypercall(unsigned long cmd,
unsigned long in1, unsigned long in3,
unsigned long in4, unsigned long in5,
u32 *out1, u32 *out2, u32 *out3,
u32 *out4, u32 *out5);
/* /*
* The low bandwidth call. The low word of %edx is presumed to have OUT bit * The low bandwidth call. The low word of %edx is presumed to have OUT bit
* set. The high word of %edx may contain input data from the caller. * set. The high word of %edx may contain input data from the caller.
@ -79,6 +100,10 @@ unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, 0, 0, 0, return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
@ -100,6 +125,10 @@ unsigned long vmware_hypercall3(unsigned long cmd, unsigned long in1,
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
out1, out2, NULL, NULL, NULL);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, 0, 0, 0, return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
out1, out2, NULL, NULL, NULL); out1, out2, NULL, NULL, NULL);
@ -121,6 +150,10 @@ unsigned long vmware_hypercall4(unsigned long cmd, unsigned long in1,
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
out1, out2, out3, NULL, NULL);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, 0, 0, 0, return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
out1, out2, out3, NULL, NULL); out1, out2, out3, NULL, NULL);
@ -143,6 +176,10 @@ unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
NULL, out2, NULL, NULL, NULL);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, in3, in4, in5, return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
NULL, out2, NULL, NULL, NULL); NULL, out2, NULL, NULL, NULL);
@ -167,6 +204,10 @@ unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, in3, 0, 0,
NULL, out2, out3, out4, out5);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, in3, 0, 0, return vmware_hypercall_slow(cmd, in1, in3, 0, 0,
NULL, out2, out3, out4, out5); NULL, out2, out3, out4, out5);
@ -191,6 +232,10 @@ unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
{ {
unsigned long out0; unsigned long out0;
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
out1, out2, out3, NULL, NULL);
if (unlikely(!alternatives_patched) && !__is_defined(MODULE)) if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
return vmware_hypercall_slow(cmd, in1, in3, in4, in5, return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
out1, out2, out3, NULL, NULL); out1, out2, out3, NULL, NULL);

View File

@ -489,6 +489,58 @@ static bool __init vmware_legacy_x2apic_available(void)
(eax & GETVCPU_INFO_LEGACY_X2APIC); (eax & GETVCPU_INFO_LEGACY_X2APIC);
} }
#ifdef CONFIG_INTEL_TDX_GUEST
/*
* TDCALL[TDG.VP.VMCALL] uses %rax (arg0) and %rcx (arg2). Therefore,
* we remap those registers to %r12 and %r13, respectively.
*/
unsigned long vmware_tdx_hypercall(unsigned long cmd,
unsigned long in1, unsigned long in3,
unsigned long in4, unsigned long in5,
u32 *out1, u32 *out2, u32 *out3,
u32 *out4, u32 *out5)
{
struct tdx_module_args args = {};
if (!hypervisor_is_type(X86_HYPER_VMWARE)) {
pr_warn_once("Incorrect usage\n");
return ULONG_MAX;
}
if (cmd & ~VMWARE_CMD_MASK) {
pr_warn_once("Out of range command %lx\n", cmd);
return ULONG_MAX;
}
args.rbx = in1;
args.rdx = in3;
args.rsi = in4;
args.rdi = in5;
args.r10 = VMWARE_TDX_VENDOR_LEAF;
args.r11 = VMWARE_TDX_HCALL_FUNC;
args.r12 = VMWARE_HYPERVISOR_MAGIC;
args.r13 = cmd;
/* CPL */
args.r15 = 0;
__tdx_hypercall(&args);
if (out1)
*out1 = args.rbx;
if (out2)
*out2 = args.r13;
if (out3)
*out3 = args.rdx;
if (out4)
*out4 = args.rsi;
if (out5)
*out5 = args.rdi;
return args.r12;
}
EXPORT_SYMBOL_GPL(vmware_tdx_hypercall);
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
struct pt_regs *regs) struct pt_regs *regs)