linux/include/asm-generic/mshyperv.h

319 lines
9.1 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux-specific definitions for managing interactions with Microsoft's
* Hyper-V hypervisor. The definitions in this file are architecture
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
* that are specific to architecture <arch>.
*
* Definitions that are specified in the Hyper-V Top Level Functional
* Spec (TLFS) should not go in this file, but should instead go in
* hyperv-tlfs.h.
*
* Copyright (C) 2019, Microsoft, Inc.
*
* Author : Michael Kelley <mikelley@microsoft.com>
*/
#ifndef _ASM_GENERIC_MSHYPERV_H
#define _ASM_GENERIC_MSHYPERV_H
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <acpi/acpi_numa.h>
#include <linux/cpumask.h>
#include <linux/nmi.h>
#include <asm/ptrace.h>
#include <asm/hyperv-tlfs.h>
x86/hyperv: Change vTOM handling to use standard coco mechanisms Hyper-V guests on AMD SEV-SNP hardware have the option of using the "virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP architecture. With vTOM, shared vs. private memory accesses are controlled by splitting the guest physical address space into two halves. vTOM is the dividing line where the uppermost bit of the physical address space is set; e.g., with 47 bits of guest physical address space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is accessible at two parallel physical addresses -- one below vTOM and one above vTOM. Accesses below vTOM are private (encrypted) while accesses above vTOM are shared (decrypted). In this sense, vTOM is like the GPA.SHARED bit in Intel TDX. Support for Hyper-V guests using vTOM was added to the Linux kernel in two patch sets[1][2]. This support treats the vTOM bit as part of the physical address. For accessing shared (decrypted) memory, these patch sets create a second kernel virtual mapping that maps to physical addresses above vTOM. A better approach is to treat the vTOM bit as a protection flag, not as part of the physical address. This new approach is like the approach for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel virtual mapping, the existing mapping is updated using recently added coco mechanisms. When memory is changed between private and shared using set_memory_decrypted() and set_memory_encrypted(), the PTEs for the existing kernel mapping are changed to add or remove the vTOM bit in the guest physical address, just as with TDX. The hypercalls to change the memory status on the host side are made using the existing callback mechanism. Everything just works, with a minor tweak to map the IO-APIC to use private accesses. To accomplish the switch in approach, the following must be done: * Update Hyper-V initialization to set the cc_mask based on vTOM and do other coco initialization. * Update physical_mask so the vTOM bit is no longer treated as part of the physical address * Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear the vTOM bit as a protection flag. * Code already exists to make hypercalls to inform Hyper-V about pages changing between shared and private. Update this code to run as a callback from __set_memory_enc_pgtable(). * Remove the Hyper-V special case from __set_memory_enc_dec() * Remove the Hyper-V specific call to swiotlb_update_mem_attributes() since mem_encrypt_init() will now do it. * Add a Hyper-V specific implementation of the is_private_mmio() callback that returns true for the IO-APIC and vTPM MMIO addresses [1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/ [2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/ [ bp: Touchups. ] Signed-off-by: Michael Kelley <mikelley@microsoft.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 13:52:01 +00:00
#define VTPM_BASE_ADDRESS 0xfed40000
struct ms_hyperv_info {
u32 features;
u32 priv_high;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
u8 vtl;
union {
u32 isolation_config_a;
struct {
u32 paravisor_present : 1;
u32 reserved_a1 : 31;
};
};
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
u32 reserved_b1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
u32 reserved_b2 : 20;
};
};
u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
extern bool hv_nested;
extern void * __percpu *hyperv_pcpu_input_arg;
extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
bool hv_isolation_type_snp(void);
bool hv_isolation_type_tdx(void);
static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
{
struct hv_proximity_domain_info pxm_info = {};
if (node != NUMA_NO_NODE) {
pxm_info.domain_id = node_to_pxm(node);
pxm_info.flags.proximity_info_valid = 1;
pxm_info.flags.proximity_preferred = 1;
}
return pxm_info;
}
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
{
return status & HV_HYPERCALL_RESULT_MASK;
}
static inline bool hv_result_success(u64 status)
{
return hv_result(status) == HV_STATUS_SUCCESS;
}
static inline unsigned int hv_repcomp(u64 status)
{
/* Bits [43:32] of status have 'Reps completed' data. */
return (status & HV_HYPERCALL_REP_COMP_MASK) >>
HV_HYPERCALL_REP_COMP_OFFSET;
}
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
* rep_count and varhead_size comply with Hyper-V hypercall definition.
*/
static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
void *input, void *output)
{
u64 control = code;
u64 status;
u16 rep_comp;
control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
do {
status = hv_do_hypercall(control, input, output);
if (!hv_result_success(status))
return status;
rep_comp = hv_repcomp(status);
control &= ~HV_HYPERCALL_REP_START_MASK;
control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
touch_nmi_watchdog();
} while (rep_comp < rep_count);
return status;
}
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
static inline u64 hv_generate_guest_id(u64 kernel_version)
{
u64 guest_id;
guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
return guest_id;
}
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
/*
* On crash we're reading some other CPU's message page and we need
* to be careful: this other CPU may already had cleared the header
* and the host may already had delivered some other message there.
* In case we blindly write msg->header.message_type we're going
* to lose it. We can still lose a message of the same type but
* we count on the fact that there can only be one
* CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
* on crash.
*/
if (cmpxchg(&msg->header.message_type, old_msg_type,
HVMSG_NONE) != old_msg_type)
return;
/*
* The cmxchg() above does an implicit memory barrier to
* ensure the write to MessageType (ie set to
* HVMSG_NONE) happens before we read the
* MessagePending and EOMing. Otherwise, the EOMing
* will not deliver any more messages since there is
* no empty slot
*/
if (msg->header.message_flags.msg_pending) {
/*
* This will cause message queue rescan to
* possibly deliver another msg from the
* hypervisor
*/
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_* The HV_REGISTER_ are used as arguments to hv_set/get_register(), which delegate to arch-specific mechanisms for getting/setting synthetic Hyper-V MSRs. On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via the get/set vp registers hypercalls. The naming matches the TLFS document, although these register names are not specific to arm64. However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where HV_REGISTER_ is *only* used for used for VP register names used by the get/set register hypercalls. To fix this inconsistency and prevent future confusion, change the arch-generic aliases used by callers of hv_set/get_register() to have the prefix HV_MSR_ instead of HV_REGISTER_. Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the generic HV_MSR_'s point to the corresponding HV_X64_MSR_. Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h, since these are not specific to arm64. On arm64, the generic HV_MSR_'s point to the corresponding HV_REGISTER_. While at it, rename hv_get/set_registers() and related functions to hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for Hyper-V MSRs and this naming makes that clear. Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com> Reviewed-by: Wei Liu <wei.liu@kernel.org> Reviewed-by: Michael Kelley <mhklinux@outlook.com> Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com Signed-off-by: Wei Liu <wei.liu@kernel.org> Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
hv_set_msr(HV_MSR_EOM, 0);
}
}
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
void hv_remove_vmbus_handler(void);
void hv_setup_stimer0_handler(void (*handler)(void));
void hv_remove_stimer0_handler(void);
void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
extern int vmbus_interrupt;
extern int vmbus_irq;
extern bool hv_root_partition;
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
* in the context of the calling CPU. Setup a map for easy access
* to this information.
*/
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
extern u64 (*hv_read_reference_counter)(void);
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
int __init hv_common_init(void);
void __init hv_common_free(void);
x86/hyperv: Use Hyper-V entropy to seed guest random number generator A Hyper-V host provides its guest VMs with entropy in a custom ACPI table named "OEM0". The entropy bits are updated each time Hyper-V boots the VM, and are suitable for seeding the Linux guest random number generator (rng). See a brief description of OEM0 in [1]. Generation 2 VMs on Hyper-V use UEFI to boot. Existing EFI code in Linux seeds the rng with entropy bits from the EFI_RNG_PROTOCOL. Via this path, the rng is seeded very early during boot with good entropy. The ACPI OEM0 table provided in such VMs is an additional source of entropy. Generation 1 VMs on Hyper-V boot from BIOS. For these VMs, Linux doesn't currently get any entropy from the Hyper-V host. While this is not fundamentally broken because Linux can generate its own entropy, using the Hyper-V host provided entropy would get the rng off to a better start and would do so earlier in the boot process. Improve the rng seeding for Generation 1 VMs by having Hyper-V specific code in Linux take advantage of the OEM0 table to seed the rng. For Generation 2 VMs, use the OEM0 table to provide additional entropy beyond the EFI_RNG_PROTOCOL. Because the OEM0 table is custom to Hyper-V, parse it directly in the Hyper-V code in the Linux kernel and use add_bootloader_randomness() to add it to the rng. Once the entropy bits are read from OEM0, zero them out in the table so they don't appear in /sys/firmware/acpi/tables/OEM0 in the running VM. The zero'ing is done out of an abundance of caution to avoid potential security risks to the rng. Also set the OEM0 data length to zero so a kexec or other subsequent use of the table won't try to use the zero'ed bits. [1] https://download.microsoft.com/download/1/c/9/1c9813b8-089c-4fef-b2ad-ad80e79403ba/Whitepaper%20-%20The%20Windows%2010%20random%20number%20generation%20infrastructure.pdf Signed-off-by: Michael Kelley <mhklinux@outlook.com> Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com> Link: https://lore.kernel.org/r/20240318155408.216851-1-mhklinux@outlook.com Signed-off-by: Wei Liu <wei.liu@kernel.org> Message-ID: <20240318155408.216851-1-mhklinux@outlook.com>
2024-03-18 15:54:08 +00:00
void __init ms_hyperv_late_init(void);
int hv_common_cpu_init(unsigned int cpu);
int hv_common_cpu_die(unsigned int cpu);
void *hv_alloc_hyperv_page(void);
void *hv_alloc_hyperv_zeroed_page(void);
void hv_free_hyperv_page(void *addr);
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
* This function returns the mapping between the Linux processor
* number and the hypervisor's virtual processor number, useful
* in making hypercalls and such that talk about specific
* processors.
*
* Return: Virtual processor number in Hyper-V terms
*/
static inline int hv_cpu_number_to_vp_number(int cpu_number)
{
return hv_vp_index[cpu_number];
}
static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
const struct cpumask *cpus,
bool (*func)(int cpu))
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
/* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */
if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
return 0;
/*
* Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
vpset->bank_contents[vcpu_bank] = 0;
/*
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
if (func && func(cpu))
continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
__set_bit(vcpu_offset, (unsigned long *)
&vpset->bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
nr_bank = vcpu_bank + 1;
}
vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
return nr_bank;
}
/*
* Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant,
* 'func' is called for each CPU present in cpumask. If 'func' returns
* true, that CPU is skipped -- i.e., that CPU from cpumask is *not*
* added to the Hyper-V VPset. If 'func' is NULL, no CPUs are
* skipped.
*/
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
const struct cpumask *cpus)
{
return __cpumask_to_vpset(vpset, cpus, NULL);
}
static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
const struct cpumask *cpus,
bool (*func)(int cpu))
{
return __cpumask_to_vpset(vpset, cpus, func);
}
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
x86/hyperv: Implement hv_is_hibernation_supported() The API will be used by the hv_balloon and hv_vmbus drivers. Balloon up/down and hot-add of memory must not be active if the user wants the Linux VM to support hibernation, because they are incompatible with hibernation according to Hyper-V team, e.g. upon suspend the balloon VSP doesn't save any info about the ballooned-out pages (if any); so, after Linux resumes, Linux balloon VSC expects that the VSP will return the pages if Linux is under memory pressure, but the VSP will never do that, since the VSP thinks it never stole the pages from the VM. So, if the user wants Linux VM to support hibernation, Linux must forbid balloon up/down and hot-add, and the only functionality of the balloon VSC driver is reporting the VM's memory pressure to the host. Ideally, when Linux detects that the user wants it to support hibernation, the balloon VSC should tell the VSP that it does not support ballooning and hot-add. However, the current version of the VSP requires the VSC should support these capabilities, otherwise the capability negotiation fails and the VSC can not load at all, so with the later changes to the VSC driver, Linux VM still reports to the VSP that the VSC supports these capabilities, but the VSC ignores the VSP's requests of balloon up/down and hot add, and reports an error to the VSP, when applicable. BTW, in the future the balloon VSP driver will allow the VSC to not support the capabilities of balloon up/down and hot add. The ACPI S4 state is not a must for hibernation to work, because Linux is able to hibernate as long as the system can shut down. However in practice we decide to artificially use the presence of the virtual ACPI S4 state as an indicator of the user's intent of using hibernation, because Linux VM must find a way to know if the user wants to use the hibernation feature or not. By default, Hyper-V does not enable the virtual ACPI S4 state; on recent Hyper-V hosts (e.g. RS5, 19H1), the administrator is able to enable the state for a VM by WMI commands. Once all the vmbus and VSC patches for the hibernation feature are accepted, an extra patch will be submitted to forbid hibernation if the virtual ACPI S4 state is absent, i.e. hv_is_hibernation_supported() is false. Signed-off-by: Dexuan Cui <decui@microsoft.com> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sasha Levin <sashal@kernel.org>
2019-11-20 07:16:04 +00:00
bool hv_is_hibernation_supported(void);
enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
x86/hyperv: Implement hv_is_hibernation_supported() The API will be used by the hv_balloon and hv_vmbus drivers. Balloon up/down and hot-add of memory must not be active if the user wants the Linux VM to support hibernation, because they are incompatible with hibernation according to Hyper-V team, e.g. upon suspend the balloon VSP doesn't save any info about the ballooned-out pages (if any); so, after Linux resumes, Linux balloon VSC expects that the VSP will return the pages if Linux is under memory pressure, but the VSP will never do that, since the VSP thinks it never stole the pages from the VM. So, if the user wants Linux VM to support hibernation, Linux must forbid balloon up/down and hot-add, and the only functionality of the balloon VSC driver is reporting the VM's memory pressure to the host. Ideally, when Linux detects that the user wants it to support hibernation, the balloon VSC should tell the VSP that it does not support ballooning and hot-add. However, the current version of the VSP requires the VSC should support these capabilities, otherwise the capability negotiation fails and the VSC can not load at all, so with the later changes to the VSC driver, Linux VM still reports to the VSP that the VSC supports these capabilities, but the VSC ignores the VSP's requests of balloon up/down and hot add, and reports an error to the VSP, when applicable. BTW, in the future the balloon VSP driver will allow the VSC to not support the capabilities of balloon up/down and hot add. The ACPI S4 state is not a must for hibernation to work, because Linux is able to hibernate as long as the system can shut down. However in practice we decide to artificially use the presence of the virtual ACPI S4 state as an indicator of the user's intent of using hibernation, because Linux VM must find a way to know if the user wants to use the hibernation feature or not. By default, Hyper-V does not enable the virtual ACPI S4 state; on recent Hyper-V hosts (e.g. RS5, 19H1), the administrator is able to enable the state for a VM by WMI commands. Once all the vmbus and VSC patches for the hibernation feature are accepted, an extra patch will be submitted to forbid hibernation if the virtual ACPI S4 state is absent, i.e. hv_is_hibernation_supported() is false. Signed-off-by: Dexuan Cui <decui@microsoft.com> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sasha Levin <sashal@kernel.org>
2019-11-20 07:16:04 +00:00
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
x86/hyperv: Use Hyper-V entropy to seed guest random number generator A Hyper-V host provides its guest VMs with entropy in a custom ACPI table named "OEM0". The entropy bits are updated each time Hyper-V boots the VM, and are suitable for seeding the Linux guest random number generator (rng). See a brief description of OEM0 in [1]. Generation 2 VMs on Hyper-V use UEFI to boot. Existing EFI code in Linux seeds the rng with entropy bits from the EFI_RNG_PROTOCOL. Via this path, the rng is seeded very early during boot with good entropy. The ACPI OEM0 table provided in such VMs is an additional source of entropy. Generation 1 VMs on Hyper-V boot from BIOS. For these VMs, Linux doesn't currently get any entropy from the Hyper-V host. While this is not fundamentally broken because Linux can generate its own entropy, using the Hyper-V host provided entropy would get the rng off to a better start and would do so earlier in the boot process. Improve the rng seeding for Generation 1 VMs by having Hyper-V specific code in Linux take advantage of the OEM0 table to seed the rng. For Generation 2 VMs, use the OEM0 table to provide additional entropy beyond the EFI_RNG_PROTOCOL. Because the OEM0 table is custom to Hyper-V, parse it directly in the Hyper-V code in the Linux kernel and use add_bootloader_randomness() to add it to the rng. Once the entropy bits are read from OEM0, zero them out in the table so they don't appear in /sys/firmware/acpi/tables/OEM0 in the running VM. The zero'ing is done out of an abundance of caution to avoid potential security risks to the rng. Also set the OEM0 data length to zero so a kexec or other subsequent use of the table won't try to use the zero'ed bits. [1] https://download.microsoft.com/download/1/c/9/1c9813b8-089c-4fef-b2ad-ad80e79403ba/Whitepaper%20-%20The%20Windows%2010%20random%20number%20generation%20infrastructure.pdf Signed-off-by: Michael Kelley <mhklinux@outlook.com> Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com> Link: https://lore.kernel.org/r/20240318155408.216851-1-mhklinux@outlook.com Signed-off-by: Wei Liu <wei.liu@kernel.org> Message-ID: <20240318155408.216851-1-mhklinux@outlook.com>
2024-03-18 15:54:08 +00:00
static inline void ms_hyperv_late_init(void) {}
static inline bool hv_is_isolation_supported(void) { return false; }
static inline enum hv_isolation_type hv_get_isolation_type(void)
{
return HV_ISOLATION_TYPE_NONE;
}
#endif /* CONFIG_HYPERV */
#endif