2019-05-30 00:14:00 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Linux-specific definitions for managing interactions with Microsoft's
|
|
|
|
* Hyper-V hypervisor. The definitions in this file are architecture
|
|
|
|
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
|
|
|
|
* that are specific to architecture <arch>.
|
|
|
|
*
|
|
|
|
* Definitions that are specified in the Hyper-V Top Level Functional
|
|
|
|
* Spec (TLFS) should not go in this file, but should instead go in
|
|
|
|
* hyperv-tlfs.h.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019, Microsoft, Inc.
|
|
|
|
*
|
|
|
|
* Author : Michael Kelley <mikelley@microsoft.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_GENERIC_MSHYPERV_H
|
|
|
|
#define _ASM_GENERIC_MSHYPERV_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/bitops.h>
|
2024-03-22 21:10:26 +00:00
|
|
|
#include <acpi/acpi_numa.h>
|
2019-05-30 00:14:00 +00:00
|
|
|
#include <linux/cpumask.h>
|
2021-07-12 02:50:04 +00:00
|
|
|
#include <linux/nmi.h>
|
2019-05-30 00:14:00 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/hyperv-tlfs.h>
|
|
|
|
|
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 13:52:01 +00:00
|
|
|
#define VTPM_BASE_ADDRESS 0xfed40000
|
|
|
|
|
2019-05-30 00:14:00 +00:00
|
|
|
struct ms_hyperv_info {
|
|
|
|
u32 features;
|
2021-03-23 18:47:16 +00:00
|
|
|
u32 priv_high;
|
2019-05-30 00:14:00 +00:00
|
|
|
u32 misc_features;
|
|
|
|
u32 hints;
|
|
|
|
u32 nested_features;
|
|
|
|
u32 max_vp_index;
|
|
|
|
u32 max_lp_index;
|
2023-09-22 19:28:40 +00:00
|
|
|
u8 vtl;
|
2023-08-18 10:29:11 +00:00
|
|
|
union {
|
|
|
|
u32 isolation_config_a;
|
|
|
|
struct {
|
|
|
|
u32 paravisor_present : 1;
|
|
|
|
u32 reserved_a1 : 31;
|
|
|
|
};
|
|
|
|
};
|
2021-10-25 12:21:07 +00:00
|
|
|
union {
|
|
|
|
u32 isolation_config_b;
|
|
|
|
struct {
|
|
|
|
u32 cvm_type : 4;
|
2023-08-18 10:29:11 +00:00
|
|
|
u32 reserved_b1 : 1;
|
2021-10-25 12:21:07 +00:00
|
|
|
u32 shared_gpa_boundary_active : 1;
|
|
|
|
u32 shared_gpa_boundary_bits : 6;
|
2023-08-18 10:29:11 +00:00
|
|
|
u32 reserved_b2 : 20;
|
2021-10-25 12:21:07 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
u64 shared_gpa_boundary;
|
2019-05-30 00:14:00 +00:00
|
|
|
};
|
|
|
|
extern struct ms_hyperv_info ms_hyperv;
|
2023-01-02 07:12:51 +00:00
|
|
|
extern bool hv_nested;
|
2019-05-30 00:14:00 +00:00
|
|
|
|
2021-12-28 03:31:54 +00:00
|
|
|
extern void * __percpu *hyperv_pcpu_input_arg;
|
|
|
|
extern void * __percpu *hyperv_pcpu_output_arg;
|
2021-07-14 18:34:45 +00:00
|
|
|
|
2019-05-30 00:14:00 +00:00
|
|
|
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
|
|
|
|
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
|
2023-08-24 08:07:11 +00:00
|
|
|
bool hv_isolation_type_snp(void);
|
2023-08-24 08:07:03 +00:00
|
|
|
bool hv_isolation_type_tdx(void);
|
2019-05-30 00:14:00 +00:00
|
|
|
|
2024-03-22 21:10:26 +00:00
|
|
|
static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
|
|
|
|
{
|
|
|
|
struct hv_proximity_domain_info pxm_info = {};
|
|
|
|
|
|
|
|
if (node != NUMA_NO_NODE) {
|
|
|
|
pxm_info.domain_id = node_to_pxm(node);
|
|
|
|
pxm_info.flags.proximity_info_valid = 1;
|
|
|
|
pxm_info.flags.proximity_preferred = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pxm_info;
|
|
|
|
}
|
|
|
|
|
2021-04-17 00:43:03 +00:00
|
|
|
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
|
|
|
|
static inline int hv_result(u64 status)
|
|
|
|
{
|
|
|
|
return status & HV_HYPERCALL_RESULT_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool hv_result_success(u64 status)
|
|
|
|
{
|
|
|
|
return hv_result(status) == HV_STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int hv_repcomp(u64 status)
|
|
|
|
{
|
|
|
|
/* Bits [43:32] of status have 'Reps completed' data. */
|
|
|
|
return (status & HV_HYPERCALL_REP_COMP_MASK) >>
|
|
|
|
HV_HYPERCALL_REP_COMP_OFFSET;
|
|
|
|
}
|
|
|
|
|
2021-04-17 00:43:02 +00:00
|
|
|
/*
|
|
|
|
* Rep hypercalls. Callers of this functions are supposed to ensure that
|
|
|
|
* rep_count and varhead_size comply with Hyper-V hypercall definition.
|
|
|
|
*/
|
|
|
|
static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
|
|
|
|
void *input, void *output)
|
|
|
|
{
|
|
|
|
u64 control = code;
|
|
|
|
u64 status;
|
|
|
|
u16 rep_comp;
|
|
|
|
|
|
|
|
control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
|
|
|
|
control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = hv_do_hypercall(control, input, output);
|
2021-04-17 00:43:03 +00:00
|
|
|
if (!hv_result_success(status))
|
2021-04-17 00:43:02 +00:00
|
|
|
return status;
|
|
|
|
|
2021-04-17 00:43:03 +00:00
|
|
|
rep_comp = hv_repcomp(status);
|
2021-04-17 00:43:02 +00:00
|
|
|
|
|
|
|
control &= ~HV_HYPERCALL_REP_START_MASK;
|
|
|
|
control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
|
|
|
|
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
} while (rep_comp < rep_count);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2019-05-30 00:14:00 +00:00
|
|
|
|
|
|
|
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
|
2022-09-28 06:40:46 +00:00
|
|
|
static inline u64 hv_generate_guest_id(u64 kernel_version)
|
2019-05-30 00:14:00 +00:00
|
|
|
{
|
2022-09-28 06:40:46 +00:00
|
|
|
u64 guest_id;
|
2019-05-30 00:14:00 +00:00
|
|
|
|
2022-09-28 06:40:46 +00:00
|
|
|
guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
|
2019-05-30 00:14:00 +00:00
|
|
|
guest_id |= (kernel_version << 16);
|
|
|
|
|
|
|
|
return guest_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free the message slot and signal end-of-message if required */
|
|
|
|
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* On crash we're reading some other CPU's message page and we need
|
|
|
|
* to be careful: this other CPU may already had cleared the header
|
|
|
|
* and the host may already had delivered some other message there.
|
|
|
|
* In case we blindly write msg->header.message_type we're going
|
|
|
|
* to lose it. We can still lose a message of the same type but
|
|
|
|
* we count on the fact that there can only be one
|
|
|
|
* CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
|
|
|
|
* on crash.
|
|
|
|
*/
|
|
|
|
if (cmpxchg(&msg->header.message_type, old_msg_type,
|
|
|
|
HVMSG_NONE) != old_msg_type)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The cmxchg() above does an implicit memory barrier to
|
|
|
|
* ensure the write to MessageType (ie set to
|
|
|
|
* HVMSG_NONE) happens before we read the
|
|
|
|
* MessagePending and EOMing. Otherwise, the EOMing
|
|
|
|
* will not deliver any more messages since there is
|
|
|
|
* no empty slot
|
|
|
|
*/
|
|
|
|
if (msg->header.message_flags.msg_pending) {
|
|
|
|
/*
|
|
|
|
* This will cause message queue rescan to
|
|
|
|
* possibly deliver another msg from the
|
|
|
|
* hypervisor
|
|
|
|
*/
|
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.
On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.
However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.
To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.
Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.
Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.
While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.
Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
|
|
|
hv_set_msr(HV_MSR_EOM, 0);
|
2019-05-30 00:14:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-07 23:03:38 +00:00
|
|
|
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
|
|
|
|
|
2021-03-02 21:38:18 +00:00
|
|
|
void hv_setup_vmbus_handler(void (*handler)(void));
|
|
|
|
void hv_remove_vmbus_handler(void);
|
2021-03-10 18:47:49 +00:00
|
|
|
void hv_setup_stimer0_handler(void (*handler)(void));
|
|
|
|
void hv_remove_stimer0_handler(void);
|
2019-05-30 00:14:00 +00:00
|
|
|
|
|
|
|
void hv_setup_kexec_handler(void (*handler)(void));
|
|
|
|
void hv_remove_kexec_handler(void);
|
|
|
|
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
|
|
|
|
void hv_remove_crash_handler(void);
|
|
|
|
|
2020-08-14 19:45:04 +00:00
|
|
|
extern int vmbus_interrupt;
|
2021-03-02 21:38:18 +00:00
|
|
|
extern int vmbus_irq;
|
2020-08-14 19:45:04 +00:00
|
|
|
|
2021-07-14 18:34:45 +00:00
|
|
|
extern bool hv_root_partition;
|
|
|
|
|
2019-05-30 00:14:00 +00:00
|
|
|
#if IS_ENABLED(CONFIG_HYPERV)
|
|
|
|
/*
|
|
|
|
* Hypervisor's notion of virtual processor ID is different from
|
|
|
|
* Linux' notion of CPU ID. This information can only be retrieved
|
|
|
|
* in the context of the calling CPU. Setup a map for easy access
|
|
|
|
* to this information.
|
|
|
|
*/
|
|
|
|
extern u32 *hv_vp_index;
|
|
|
|
extern u32 hv_max_vp_index;
|
|
|
|
|
2021-07-14 00:01:46 +00:00
|
|
|
extern u64 (*hv_read_reference_counter)(void);
|
|
|
|
|
2019-05-30 00:14:00 +00:00
|
|
|
/* Sentinel value for an uninitialized entry in hv_vp_index array */
|
|
|
|
#define VP_INVAL U32_MAX
|
|
|
|
|
2021-07-14 18:34:45 +00:00
|
|
|
int __init hv_common_init(void);
|
|
|
|
void __init hv_common_free(void);
|
2024-03-18 15:54:08 +00:00
|
|
|
void __init ms_hyperv_late_init(void);
|
2021-07-14 18:34:45 +00:00
|
|
|
int hv_common_cpu_init(unsigned int cpu);
|
|
|
|
int hv_common_cpu_die(unsigned int cpu);
|
|
|
|
|
2021-03-02 21:38:13 +00:00
|
|
|
void *hv_alloc_hyperv_page(void);
|
|
|
|
void *hv_alloc_hyperv_zeroed_page(void);
|
2023-06-23 22:09:49 +00:00
|
|
|
void hv_free_hyperv_page(void *addr);
|
2021-03-02 21:38:13 +00:00
|
|
|
|
2019-05-30 00:14:00 +00:00
|
|
|
/**
|
|
|
|
* hv_cpu_number_to_vp_number() - Map CPU to VP.
|
|
|
|
* @cpu_number: CPU number in Linux terms
|
|
|
|
*
|
|
|
|
* This function returns the mapping between the Linux processor
|
|
|
|
* number and the hypervisor's virtual processor number, useful
|
|
|
|
* in making hypercalls and such that talk about specific
|
|
|
|
* processors.
|
|
|
|
*
|
|
|
|
* Return: Virtual processor number in Hyper-V terms
|
|
|
|
*/
|
|
|
|
static inline int hv_cpu_number_to_vp_number(int cpu_number)
|
|
|
|
{
|
|
|
|
return hv_vp_index[cpu_number];
|
|
|
|
}
|
|
|
|
|
2021-09-10 18:57:13 +00:00
|
|
|
static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
|
|
|
|
const struct cpumask *cpus,
|
2023-03-27 13:16:06 +00:00
|
|
|
bool (*func)(int cpu))
|
2019-05-30 00:14:00 +00:00
|
|
|
{
|
|
|
|
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
|
2022-11-01 14:53:52 +00:00
|
|
|
int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
|
2019-05-30 00:14:00 +00:00
|
|
|
|
2022-11-01 14:53:52 +00:00
|
|
|
/* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */
|
|
|
|
if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
|
2019-05-30 00:14:00 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
|
|
|
|
* structs are not cleared between calls, we risk flushing unneeded
|
|
|
|
* vCPUs otherwise.
|
|
|
|
*/
|
2022-11-01 14:53:52 +00:00
|
|
|
for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
|
2019-05-30 00:14:00 +00:00
|
|
|
vpset->bank_contents[vcpu_bank] = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some banks may end up being empty but this is acceptable.
|
|
|
|
*/
|
|
|
|
for_each_cpu(cpu, cpus) {
|
2023-03-27 13:16:06 +00:00
|
|
|
if (func && func(cpu))
|
2021-09-10 18:57:13 +00:00
|
|
|
continue;
|
2019-05-30 00:14:00 +00:00
|
|
|
vcpu = hv_cpu_number_to_vp_number(cpu);
|
|
|
|
if (vcpu == VP_INVAL)
|
|
|
|
return -1;
|
2022-11-01 14:53:52 +00:00
|
|
|
vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
|
|
|
|
vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
|
2019-05-30 00:14:00 +00:00
|
|
|
__set_bit(vcpu_offset, (unsigned long *)
|
|
|
|
&vpset->bank_contents[vcpu_bank]);
|
|
|
|
if (vcpu_bank >= nr_bank)
|
|
|
|
nr_bank = vcpu_bank + 1;
|
|
|
|
}
|
|
|
|
vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
|
|
|
|
return nr_bank;
|
|
|
|
}
|
|
|
|
|
2023-03-27 13:16:06 +00:00
|
|
|
/*
|
|
|
|
* Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant,
|
|
|
|
* 'func' is called for each CPU present in cpumask. If 'func' returns
|
|
|
|
* true, that CPU is skipped -- i.e., that CPU from cpumask is *not*
|
|
|
|
* added to the Hyper-V VPset. If 'func' is NULL, no CPUs are
|
|
|
|
* skipped.
|
|
|
|
*/
|
2021-09-10 18:57:13 +00:00
|
|
|
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
|
|
|
const struct cpumask *cpus)
|
|
|
|
{
|
2023-03-27 13:16:06 +00:00
|
|
|
return __cpumask_to_vpset(vpset, cpus, NULL);
|
2021-09-10 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
2023-03-27 13:16:06 +00:00
|
|
|
static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
|
|
|
|
const struct cpumask *cpus,
|
|
|
|
bool (*func)(int cpu))
|
2021-09-10 18:57:13 +00:00
|
|
|
{
|
2023-03-27 13:16:06 +00:00
|
|
|
return __cpumask_to_vpset(vpset, cpus, func);
|
2021-09-10 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 15:53:31 +00:00
|
|
|
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
|
2019-05-30 00:14:00 +00:00
|
|
|
bool hv_is_hyperv_initialized(void);
|
x86/hyperv: Implement hv_is_hibernation_supported()
The API will be used by the hv_balloon and hv_vmbus drivers.
Balloon up/down and hot-add of memory must not be active if the user
wants the Linux VM to support hibernation, because they are incompatible
with hibernation according to Hyper-V team, e.g. upon suspend the
balloon VSP doesn't save any info about the ballooned-out pages (if any);
so, after Linux resumes, Linux balloon VSC expects that the VSP will
return the pages if Linux is under memory pressure, but the VSP will
never do that, since the VSP thinks it never stole the pages from the VM.
So, if the user wants Linux VM to support hibernation, Linux must forbid
balloon up/down and hot-add, and the only functionality of the balloon VSC
driver is reporting the VM's memory pressure to the host.
Ideally, when Linux detects that the user wants it to support hibernation,
the balloon VSC should tell the VSP that it does not support ballooning
and hot-add. However, the current version of the VSP requires the VSC
should support these capabilities, otherwise the capability negotiation
fails and the VSC can not load at all, so with the later changes to the
VSC driver, Linux VM still reports to the VSP that the VSC supports these
capabilities, but the VSC ignores the VSP's requests of balloon up/down
and hot add, and reports an error to the VSP, when applicable. BTW, in
the future the balloon VSP driver will allow the VSC to not support the
capabilities of balloon up/down and hot add.
The ACPI S4 state is not a must for hibernation to work, because Linux is
able to hibernate as long as the system can shut down. However in practice
we decide to artificially use the presence of the virtual ACPI S4 state as
an indicator of the user's intent of using hibernation, because Linux VM
must find a way to know if the user wants to use the hibernation feature
or not.
By default, Hyper-V does not enable the virtual ACPI S4 state; on recent
Hyper-V hosts (e.g. RS5, 19H1), the administrator is able to enable the
state for a VM by WMI commands.
Once all the vmbus and VSC patches for the hibernation feature are
accepted, an extra patch will be submitted to forbid hibernation if the
virtual ACPI S4 state is absent, i.e. hv_is_hibernation_supported() is
false.
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2019-11-20 07:16:04 +00:00
|
|
|
bool hv_is_hibernation_supported(void);
|
2021-02-01 14:48:11 +00:00
|
|
|
enum hv_isolation_type hv_get_isolation_type(void);
|
|
|
|
bool hv_is_isolation_supported(void);
|
2021-10-25 12:21:06 +00:00
|
|
|
bool hv_isolation_type_snp(void);
|
2021-10-25 12:21:12 +00:00
|
|
|
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
|
2023-08-24 08:07:04 +00:00
|
|
|
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
|
2019-05-30 00:14:00 +00:00
|
|
|
void hyperv_cleanup(void);
|
2021-03-23 18:47:16 +00:00
|
|
|
bool hv_query_ext_cap(u64 cap_query);
|
2022-03-24 16:14:51 +00:00
|
|
|
void hv_setup_dma_ops(struct device *dev, bool coherent);
|
2019-05-30 00:14:00 +00:00
|
|
|
#else /* CONFIG_HYPERV */
|
|
|
|
static inline bool hv_is_hyperv_initialized(void) { return false; }
|
x86/hyperv: Implement hv_is_hibernation_supported()
The API will be used by the hv_balloon and hv_vmbus drivers.
Balloon up/down and hot-add of memory must not be active if the user
wants the Linux VM to support hibernation, because they are incompatible
with hibernation according to Hyper-V team, e.g. upon suspend the
balloon VSP doesn't save any info about the ballooned-out pages (if any);
so, after Linux resumes, Linux balloon VSC expects that the VSP will
return the pages if Linux is under memory pressure, but the VSP will
never do that, since the VSP thinks it never stole the pages from the VM.
So, if the user wants Linux VM to support hibernation, Linux must forbid
balloon up/down and hot-add, and the only functionality of the balloon VSC
driver is reporting the VM's memory pressure to the host.
Ideally, when Linux detects that the user wants it to support hibernation,
the balloon VSC should tell the VSP that it does not support ballooning
and hot-add. However, the current version of the VSP requires the VSC
should support these capabilities, otherwise the capability negotiation
fails and the VSC can not load at all, so with the later changes to the
VSC driver, Linux VM still reports to the VSP that the VSC supports these
capabilities, but the VSC ignores the VSP's requests of balloon up/down
and hot add, and reports an error to the VSP, when applicable. BTW, in
the future the balloon VSP driver will allow the VSC to not support the
capabilities of balloon up/down and hot add.
The ACPI S4 state is not a must for hibernation to work, because Linux is
able to hibernate as long as the system can shut down. However in practice
we decide to artificially use the presence of the virtual ACPI S4 state as
an indicator of the user's intent of using hibernation, because Linux VM
must find a way to know if the user wants to use the hibernation feature
or not.
By default, Hyper-V does not enable the virtual ACPI S4 state; on recent
Hyper-V hosts (e.g. RS5, 19H1), the administrator is able to enable the
state for a VM by WMI commands.
Once all the vmbus and VSC patches for the hibernation feature are
accepted, an extra patch will be submitted to forbid hibernation if the
virtual ACPI S4 state is absent, i.e. hv_is_hibernation_supported() is
false.
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2019-11-20 07:16:04 +00:00
|
|
|
static inline bool hv_is_hibernation_supported(void) { return false; }
|
2019-05-30 00:14:00 +00:00
|
|
|
static inline void hyperv_cleanup(void) {}
|
2024-03-18 15:54:08 +00:00
|
|
|
static inline void ms_hyperv_late_init(void) {}
|
2021-10-25 12:21:06 +00:00
|
|
|
static inline bool hv_is_isolation_supported(void) { return false; }
|
|
|
|
static inline enum hv_isolation_type hv_get_isolation_type(void)
|
|
|
|
{
|
|
|
|
return HV_ISOLATION_TYPE_NONE;
|
|
|
|
}
|
2019-05-30 00:14:00 +00:00
|
|
|
#endif /* CONFIG_HYPERV */
|
|
|
|
|
|
|
|
#endif
|