Selftest changes:

* Cleanups for the perf test infrastructure and mapping hugepages
 
 * Avoid contention on mmap_sem when the guests start to run
 
 * Add event channel upcall support to xen_shinfo_test
 
 x86 changes:
 
 * Fixes for Xen emulation
 
 * Kill kvm_map_gfn() / kvm_unmap_gfn() and broken gfn_to_pfn_cache
 
 * Fixes for migration of 32-bit nested guests on 64-bit hypervisor
 
 * Compilation fixes
 
 * More SEV cleanups
 
 Generic:
 
 * Cap the return value of KVM_CAP_NR_VCPUS to both KVM_CAP_MAX_VCPUS
 and num_online_cpus().  Most architectures were only using one of the two.
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmGV/PAUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMrogf/eAyilGRQL7lLETn3DTVlgLVv82+z
 giX11HlUhUmATHIDluj/wVQUjVcY6AO4SnvFaudX7B+mibndkw4L19IubP/koQZu
 xnKSJTn+mVANdzz3UdsHl0ujbPdQJaFCIPW6iewbn2GRRZMwA5F3vMK/H09XRApL
 I7kq8CPA6sC0I3TPzPN3ROxigexzYunZmGQ4qQe0GUdtxHrJOYQN++ddmWbQoEIC
 gdFTyF7CUQ+lmJe0b/Y88yhISFAJCEBuKFlg9tOTuxSfwvPX6lUu+pi+utEx9M+O
 ckTSQli/apZ4RVcSzxMIwX/BciYqhqOz5uMG+w4DRlJixtGSHtjiEVxGxw==
 =Iij4
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Selftest changes:

   - Cleanups for the perf test infrastructure and mapping hugepages

   - Avoid contention on mmap_sem when the guests start to run

   - Add event channel upcall support to xen_shinfo_test

  x86 changes:

   - Fixes for Xen emulation

   - Kill kvm_map_gfn() / kvm_unmap_gfn() and broken gfn_to_pfn_cache

   - Fixes for migration of 32-bit nested guests on 64-bit hypervisor

   - Compilation fixes

   - More SEV cleanups

  Generic:

   - Cap the return value of KVM_CAP_NR_VCPUS to both KVM_CAP_MAX_VCPUS
     and num_online_cpus(). Most architectures were only using one of
     the two"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (42 commits)
  KVM: x86: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
  KVM: s390: Cap KVM_CAP_NR_VCPUS by num_online_cpus()
  KVM: RISC-V: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
  KVM: PPC: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
  KVM: MIPS: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
  KVM: arm64: Cap KVM_CAP_NR_VCPUS by kvm_arm_default_max_vcpus()
  KVM: x86: Assume a 64-bit hypercall for guests with protected state
  selftests: KVM: Add /x86_64/sev_migrate_tests to .gitignore
  riscv: kvm: fix non-kernel-doc comment block
  KVM: SEV: Fix typo in and tweak name of cmd_allowed_from_miror()
  KVM: SEV: Drop a redundant setting of sev->asid during initialization
  KVM: SEV: WARN if SEV-ES is marked active but SEV is not
  KVM: SEV: Set sev_info.active after initial checks in sev_guest_init()
  KVM: SEV: Disallow COPY_ENC_CONTEXT_FROM if target has created vCPUs
  KVM: Kill kvm_map_gfn() / kvm_unmap_gfn() and gfn_to_pfn_cache
  KVM: nVMX: Use a gfn_to_hva_cache for vmptrld
  KVM: nVMX: Use kvm_read_guest_offset_cached() for nested VMCS check
  KVM: x86/xen: Use sizeof_field() instead of open-coding it
  KVM: nVMX: Use kvm_{read,write}_guest_cached() for shadow_vmcs12
  KVM: x86/xen: Fix get_attr of KVM_XEN_ATTR_TYPE_SHARED_INFO
  ...
This commit is contained in:
Linus Torvalds 2021-11-18 12:05:22 -08:00
commit c46e8ece96
34 changed files with 462 additions and 408 deletions

View File

@ -223,7 +223,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
/*
* ARM64 treats KVM_CAP_NR_CPUS differently from all other
* architectures, as it does not always bound it to
* KVM_CAP_MAX_VCPUS. It should not matter much because
* this is just an advisory value.
*/
r = min_t(unsigned int, num_online_cpus(),
kvm_arm_default_max_vcpus());
break;
case KVM_CAP_MAX_VCPUS:
case KVM_CAP_MAX_VCPU_ID:

View File

@ -1067,7 +1067,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;

View File

@ -641,9 +641,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* implementations just count online CPUs.
*/
if (hv_enabled)
r = num_present_cpus();
r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
else
r = num_online_cpus();
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/**
/*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:

View File

@ -74,7 +74,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;

View File

@ -585,6 +585,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_MAX_VCPUS;
else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
if (ext == KVM_CAP_NR_VCPUS)
r = min_t(unsigned int, num_online_cpus(), r);
break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;

View File

@ -363,6 +363,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
unsigned int cr4_la57:1;
unsigned int efer_lma:1;
};
};

View File

@ -125,7 +125,7 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
}
}
struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
{
u32 base = vcpu->arch.kvm_cpuid_base;

View File

@ -2022,7 +2022,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{
bool longmode;
longmode = is_64_bit_mode(vcpu);
longmode = is_64_bit_hypercall(vcpu);
if (longmode)
kvm_rax_write(vcpu, result);
else {
@ -2171,7 +2171,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_X86_64
if (is_64_bit_mode(vcpu)) {
if (is_64_bit_hypercall(vcpu)) {
hc.param = kvm_rcx_read(vcpu);
hc.ingpa = kvm_rdx_read(vcpu);
hc.outgpa = kvm_r8_read(vcpu);

View File

@ -4682,6 +4682,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
/* PKEY and LA57 are active iff long mode is active. */
ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
ext.efer_lma = ____is_efer_lma(regs);
}
ext.valid = 1;

View File

@ -237,7 +237,6 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
bool es_active = argp->id == KVM_SEV_ES_INIT;
int asid, ret;
if (kvm->created_vcpus)
@ -247,7 +246,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (unlikely(sev->active))
return ret;
sev->es_active = es_active;
sev->active = true;
sev->es_active = argp->id == KVM_SEV_ES_INIT;
asid = sev_asid_new(sev);
if (asid < 0)
goto e_no_asid;
@ -257,8 +257,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (ret)
goto e_free;
sev->active = true;
sev->asid = asid;
INIT_LIST_HEAD(&sev->regions_list);
return 0;
@ -268,6 +266,7 @@ e_free:
sev->asid = 0;
e_no_asid:
sev->es_active = false;
sev->active = false;
return ret;
}
@ -1530,7 +1529,7 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
}
static bool cmd_allowed_from_miror(u32 cmd_id)
static bool is_cmd_allowed_from_mirror(u32 cmd_id)
{
/*
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
@ -1757,7 +1756,7 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
/* Only the enc_context_owner handles some memory enc operations. */
if (is_mirroring_enc_context(kvm) &&
!cmd_allowed_from_miror(sev_cmd.id)) {
!is_cmd_allowed_from_mirror(sev_cmd.id)) {
r = -EINVAL;
goto out;
}
@ -1990,7 +1989,12 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
mutex_unlock(&source_kvm->lock);
mutex_lock(&kvm->lock);
if (sev_guest(kvm)) {
/*
* Disallow out-of-band SEV/SEV-ES init if the target is already an
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
*/
if (sev_guest(kvm) || kvm->created_vcpus) {
ret = -EINVAL;
goto e_mirror_unlock;
}

View File

@ -247,7 +247,7 @@ static __always_inline bool sev_es_guest(struct kvm *kvm)
#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return sev_guest(kvm) && sev->es_active;
return sev->es_active && !WARN_ON_ONCE(!sev->active);
#else
return false;
#endif

View File

@ -670,33 +670,39 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_host_map map;
struct vmcs12 *shadow;
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
vmcs12->vmcs_link_pointer == INVALID_GPA)
return;
shadow = get_shadow_vmcs12(vcpu);
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
vmcs12->vmcs_link_pointer, VMCS12_SIZE))
return;
memcpy(shadow, map.hva, VMCS12_SIZE);
kvm_vcpu_unmap(vcpu, &map, false);
kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
VMCS12_SIZE);
}
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
vmcs12->vmcs_link_pointer == INVALID_GPA)
return;
kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
get_shadow_vmcs12(vcpu), VMCS12_SIZE);
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
vmcs12->vmcs_link_pointer, VMCS12_SIZE))
return;
kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
VMCS12_SIZE);
}
/*
@ -2830,6 +2836,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
return 0;
}
static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
#ifdef CONFIG_X86_64
if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
!!(vcpu->arch.efer & EFER_LMA)))
return -EINVAL;
#endif
return 0;
}
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@ -2854,18 +2871,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
return -EINVAL;
#ifdef CONFIG_X86_64
ia32e = !!(vcpu->arch.efer & EFER_LMA);
ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
#else
ia32e = false;
#endif
if (ia32e) {
if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
return -EINVAL;
} else {
if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
CC((vmcs12->host_rip) >> 32))
return -EINVAL;
@ -2910,9 +2925,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
int r = 0;
struct vmcs12 *shadow;
struct kvm_host_map map;
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
struct vmcs_hdr hdr;
if (vmcs12->vmcs_link_pointer == INVALID_GPA)
return 0;
@ -2920,17 +2935,21 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
return -EINVAL;
if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
return -EINVAL;
if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
offsetof(struct vmcs12, hdr),
sizeof(hdr))))
return -EINVAL;
shadow = map.hva;
if (CC(hdr.revision_id != VMCS12_REVISION) ||
CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
return -EINVAL;
if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
r = -EINVAL;
kvm_vcpu_unmap(vcpu, &map, false);
return r;
return 0;
}
/*
@ -3535,6 +3554,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (nested_vmx_check_controls(vcpu, vmcs12))
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
if (nested_vmx_check_address_space_size(vcpu, vmcs12))
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
if (nested_vmx_check_host_state(vcpu, vmcs12))
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
@ -5264,10 +5286,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return 1;
if (vmx->nested.current_vmptr != vmptr) {
struct kvm_host_map map;
struct vmcs12 *new_vmcs12;
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
struct vmcs_hdr hdr;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
if (ghc->gpa != vmptr &&
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the
@ -5278,12 +5301,16 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
new_vmcs12 = map.hva;
if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
offsetof(struct vmcs12, hdr),
sizeof(hdr))) {
return nested_vmx_fail(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs &&
if (hdr.revision_id != VMCS12_REVISION ||
(hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kvm_vcpu_unmap(vcpu, &map, false);
return nested_vmx_fail(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
@ -5294,8 +5321,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
* Load VMCS12 from guest memory since it is not already
* cached.
*/
memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
kvm_vcpu_unmap(vcpu, &map, false);
if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
VMCS12_SIZE)) {
return nested_vmx_fail(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
set_current_vmptr(vmx, vmptr);
}

View File

@ -141,6 +141,16 @@ struct nested_vmx {
*/
struct vmcs12 *cached_shadow_vmcs12;
/*
* GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
*/
struct gfn_to_hva_cache shadow_vmcs12_cache;
/*
* GPA to HVA cache for VMCS12
*/
struct gfn_to_hva_cache vmcs12_cache;
/*
* Indicates if the shadow vmcs or enlightened vmcs must be updated
* with the data held by struct vmcs12.

View File

@ -3307,9 +3307,9 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
"xor %1, %1\n"
"2:\n"
_ASM_EXTABLE_UA(1b, 2b)
: "+r" (st_preempted),
"+&r" (err)
: "m" (st->preempted));
: "+q" (st_preempted),
"+&r" (err),
"+m" (st->preempted));
if (err)
goto out;
@ -4179,7 +4179,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !static_call(kvm_x86_cpu_has_accelerated_tpr)();
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@ -8848,7 +8848,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
trace_kvm_hypercall(nr, a0, a1, a2, a3);
op_64_bit = is_64_bit_mode(vcpu);
op_64_bit = is_64_bit_hypercall(vcpu);
if (!op_64_bit) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
@ -9547,12 +9547,16 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
return;
if (to_hv_vcpu(vcpu))
if (to_hv_vcpu(vcpu)) {
bitmap_or((ulong *)eoi_exit_bitmap,
vcpu->arch.ioapic_handled_vectors,
to_hv_synic(vcpu)->vec_bitmap, 256);
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
return;
}
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
static_call(kvm_x86_load_eoi_exitmap)(
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
}
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,

View File

@ -153,12 +153,24 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
{
int cs_db, cs_l;
WARN_ON_ONCE(vcpu->arch.guest_state_protected);
if (!is_long_mode(vcpu))
return false;
static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
return cs_l;
}
static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
{
/*
* If running with protected guest state, the CS register is not
* accessible. The hypercall register values will have had to been
* provided in 64-bit mode, so assume the guest is in 64-bit.
*/
return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
}
static inline bool x86_exception_has_error_code(unsigned int vector)
{
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |

View File

@ -127,9 +127,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
state_entry_time = vx->runstate_entry_time;
state_entry_time |= XEN_RUNSTATE_UPDATE;
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) !=
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@ -144,9 +144,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
*/
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
offsetof(struct compat_vcpu_runstate_info, state));
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) !=
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@ -163,9 +163,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
sizeof(((struct compat_vcpu_runstate_info *)0)->time));
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof_field(struct compat_vcpu_runstate_info, time));
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof(vx->runstate_times));
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@ -205,9 +205,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
BUILD_BUG_ON(sizeof(rc) !=
sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
sizeof_field(struct vcpu_info, evtchn_upcall_pending));
BUILD_BUG_ON(sizeof(rc) !=
sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
/*
* For efficiency, this mirrors the checks for using the valid
@ -299,7 +299,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
break;
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_gfn);
data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn;
r = 0;
break;
@ -698,7 +698,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
kvm_hv_hypercall_enabled(vcpu))
return kvm_hv_hypercall(vcpu);
longmode = is_64_bit_mode(vcpu);
longmode = is_64_bit_hypercall(vcpu);
if (!longmode) {
params[0] = (u32)kvm_rbx_read(vcpu);
params[1] = (u32)kvm_rcx_read(vcpu);

View File

@ -874,7 +874,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
@ -950,12 +950,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,

View File

@ -53,13 +53,6 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
struct gfn_to_pfn_cache {
u64 generation;
gfn_t gfn;
kvm_pfn_t pfn;
bool dirty;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,

View File

@ -23,6 +23,7 @@
/x86_64/platform_info_test
/x86_64/set_boot_cpu_id
/x86_64/set_sregs_test
/x86_64/sev_migrate_tests
/x86_64/smm_test
/x86_64/state_test
/x86_64/svm_vmcall_test

View File

@ -47,7 +47,7 @@
#include "guest_modes.h"
/* Global variable used to synchronize all of the vCPU threads. */
static int iteration = -1;
static int iteration;
/* Defines what vCPU threads should do during a given iteration. */
static enum {
@ -215,12 +215,11 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}
static void *vcpu_thread_main(void *arg)
static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
{
struct perf_test_vcpu_args *vcpu_args = arg;
struct kvm_vm *vm = perf_test_args.vm;
int vcpu_id = vcpu_args->vcpu_id;
int current_iteration = -1;
int current_iteration = 0;
while (spin_wait_for_next_iteration(&current_iteration)) {
switch (READ_ONCE(iteration_work)) {
@ -235,8 +234,6 @@ static void *vcpu_thread_main(void *arg)
vcpu_last_completed_iteration[vcpu_id] = current_iteration;
}
return NULL;
}
static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
@ -277,8 +274,7 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
const char *description)
{
perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
sync_global_to_guest(vm, perf_test_args);
perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, vcpus, description);
}
@ -296,48 +292,16 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
run_iteration(vm, vcpus, "Mark memory idle");
}
static pthread_t *create_vcpu_threads(int vcpus)
{
pthread_t *vcpu_threads;
int i;
vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
for (i = 0; i < vcpus; i++) {
vcpu_last_completed_iteration[i] = iteration;
pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
&perf_test_args.vcpu_args[i]);
}
return vcpu_threads;
}
static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
{
int i;
/* Set done to signal the vCPU threads to exit */
done = true;
for (i = 0; i < vcpus; i++)
pthread_join(vcpu_threads[i], NULL);
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *params = arg;
struct kvm_vm *vm;
pthread_t *vcpu_threads;
int vcpus = params->vcpus;
vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
params->backing_src);
params->backing_src, !overlap_memory_access);
perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
!overlap_memory_access);
vcpu_threads = create_vcpu_threads(vcpus);
perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
pr_info("\n");
access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
@ -352,8 +316,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, vcpus);
access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
terminate_vcpu_threads(vcpu_threads, vcpus);
free(vcpu_threads);
/* Set done to signal the vCPU threads to exit */
done = true;
perf_test_join_vcpu_threads(vcpus);
perf_test_destroy_vm(vm);
}

View File

@ -42,10 +42,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
@ -68,8 +67,6 @@ static void *vcpu_worker(void *data)
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
ts_diff.tv_sec, ts_diff.tv_nsec);
return NULL;
}
static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
@ -282,7 +279,6 @@ struct test_params {
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
pthread_t *uffd_handler_threads = NULL;
struct uffd_handler_args *uffd_args = NULL;
struct timespec start;
@ -293,9 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
int r;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type);
perf_test_args.wr_fract = 1;
p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type);
@ -304,12 +298,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
if (p->uffd_mode) {
uffd_handler_threads =
malloc(nr_vcpus * sizeof(*uffd_handler_threads));
@ -322,26 +310,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
void *vcpu_hva;
void *vcpu_alias;
uint64_t vcpu_mem_size;
if (p->partition_vcpu_memory_access) {
vcpu_gpa = guest_test_phys_mem +
(vcpu_id * guest_percpu_mem_size);
vcpu_mem_size = guest_percpu_mem_size;
} else {
vcpu_gpa = guest_test_phys_mem;
vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
}
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
/*
* Set up user fault fd to handle demand paging
@ -355,32 +332,18 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds[vcpu_id * 2], p->uffd_mode,
p->uffd_delay, &uffd_args[vcpu_id],
vcpu_hva, vcpu_alias,
vcpu_mem_size);
vcpu_args->pages * perf_test_args.guest_page_size);
}
}
/* Export the shared variables to the guest */
sync_global_to_guest(vm, perf_test_args);
pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
}
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
/* Wait for the vcpu threads to quit */
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
pthread_join(vcpu_threads[vcpu_id], NULL);
PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
}
perf_test_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
if (p->uffd_mode) {
@ -404,7 +367,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_destroy_vm(vm);
free(guest_data_prototype);
free(vcpu_threads);
if (p->uffd_mode) {
free(uffd_handler_threads);
free(uffd_args);

View File

@ -31,7 +31,7 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct kvm_vm *vm = perf_test_args.vm;
@ -41,7 +41,6 @@ static void *vcpu_worker(void *data)
struct timespec ts_diff;
struct timespec total = (struct timespec){0};
struct timespec avg;
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
run = vcpu_state(vm, vcpu_id);
@ -83,8 +82,6 @@ static void *vcpu_worker(void *data)
pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
return NULL;
}
struct test_params {
@ -170,7 +167,6 @@ static void free_bitmaps(unsigned long *bitmaps[], int slots)
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
unsigned long **bitmaps;
uint64_t guest_num_pages;
@ -186,9 +182,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec clear_dirty_log_total = (struct timespec){0};
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src);
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
perf_test_args.wr_fract = p->wr_fract;
perf_test_set_wr_fract(vm, p->wr_fract);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@ -203,25 +200,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm_enable_cap(vm, &cap);
}
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
sync_global_to_guest(vm, perf_test_args);
/* Start the iterations */
iteration = 0;
host_quit = false;
clock_gettime(CLOCK_MONOTONIC, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
vcpu_last_completed_iteration[vcpu_id] = -1;
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
}
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
@ -290,8 +277,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Tell the vcpu thread to quit */
host_quit = true;
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id], NULL);
perf_test_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
@ -306,7 +292,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
free_bitmaps(bitmaps, p->slots);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}

View File

@ -115,7 +115,7 @@ static void guest_code(void)
addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr &= ~(host_page_size - 1);
addr = align_down(addr, host_page_size);
*(uint64_t *)addr = READ_ONCE(iteration);
}
@ -737,14 +737,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (!p->phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else {
guest_test_phys_mem = p->phys_offset;
}
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);

View File

@ -8,6 +8,8 @@
#ifndef SELFTEST_KVM_PERF_TEST_UTIL_H
#define SELFTEST_KVM_PERF_TEST_UTIL_H
#include <pthread.h>
#include "kvm_util.h"
/* Default guest test virtual memory offset */
@ -18,6 +20,7 @@
#define PERF_TEST_MEM_SLOT_INDEX 1
struct perf_test_vcpu_args {
uint64_t gpa;
uint64_t gva;
uint64_t pages;
@ -27,7 +30,7 @@ struct perf_test_vcpu_args {
struct perf_test_args {
struct kvm_vm *vm;
uint64_t host_page_size;
uint64_t gpa;
uint64_t guest_page_size;
int wr_fract;
@ -36,19 +39,15 @@ struct perf_test_args {
extern struct perf_test_args perf_test_args;
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
extern uint64_t guest_test_phys_mem;
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src);
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
void perf_test_destroy_vm(struct kvm_vm *vm);
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access);
void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */

View File

@ -104,6 +104,7 @@ size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
size_t get_backing_src_pagesz(uint32_t i);
bool is_backing_src_hugetlb(uint32_t i);
void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
long get_run_delay(void);
@ -117,4 +118,29 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
}
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static inline uint64_t align_up(uint64_t x, uint64_t size)
{
uint64_t mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return ((x + mask) & ~mask);
}
static inline uint64_t align_down(uint64_t x, uint64_t size)
{
uint64_t x_aligned_up = align_up(x, size);
if (x == x_aligned_up)
return x;
else
return x_aligned_up - size;
}
static inline void *align_ptr_up(void *x, size_t size)
{
return (void *)align_up((unsigned long)x, size);
}
#endif /* SELFTEST_KVM_TEST_UTIL_H */

View File

@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
#ifdef __s390x__
alignment = max(0x100000, alignment);
#endif
guest_test_phys_mem &= ~(alignment - 1);
guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
/* Set up the shared data structure test_args */
test_args.vm = vm;

View File

@ -157,8 +157,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
"memsize of 0,\n"
" phdr index: %u p_memsz: 0x%" PRIx64,
n1, (uint64_t) phdr.p_memsz);
vm_vaddr_t seg_vstart = phdr.p_vaddr;
seg_vstart &= ~(vm_vaddr_t)(vm->page_size - 1);
vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;

View File

@ -22,15 +22,6 @@
static int vcpu_mmap_sz(void);
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static void *align(void *x, size_t size)
{
size_t mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return (void *) (((size_t) x + mask) & ~mask);
}
int open_path_or_exit(const char *path, int flags)
{
int fd;
@ -191,15 +182,15 @@ const char *vm_guest_mode_string(uint32_t i)
}
const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 52, 48, 0x1000, 12 },
{ 52, 48, 0x10000, 16 },
{ 48, 48, 0x1000, 12 },
{ 48, 48, 0x10000, 16 },
{ 40, 48, 0x1000, 12 },
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
{ 47, 64, 0x1000, 12 },
{ 44, 64, 0x1000, 12 },
[VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
[VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
[VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
[VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@ -879,9 +870,17 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
alignment = 1;
#endif
/*
* When using THP mmap is not guaranteed to returned a hugepage aligned
* address so we have to pad the mmap. Padding is not needed for HugeTLB
* because mmap will always return an address aligned to the HugeTLB
* page size.
*/
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
/* Add enough memory to align up if necessary */
if (alignment > 1)
region->mmap_size += alignment;
@ -914,8 +913,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"test_malloc failed, mmap_start: %p errno: %i",
region->mmap_start, errno);
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
"mmap_start %p is not aligned to HugeTLB page size 0x%lx",
region->mmap_start, backing_src_pagesz);
/* Align host address */
region->host_mem = align(region->mmap_start, alignment);
region->host_mem = align_ptr_up(region->mmap_start, alignment);
/* As needed perform madvise */
if ((src_type == VM_MEM_SRC_ANONYMOUS ||
@ -958,7 +962,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"mmap of alias failed, errno: %i", errno);
/* Align host alias address */
region->host_alias = align(region->mmap_alias, alignment);
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
}
}

View File

@ -10,21 +10,40 @@
struct perf_test_args perf_test_args;
uint64_t guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The id of the vCPU. */
int vcpu_id;
/* The pthread backing the vCPU. */
pthread_t thread;
/* Set to true once the vCPU thread is up and running. */
bool running;
};
/* The vCPU threads involved in this test. */
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
static void guest_code(uint32_t vcpu_id)
{
struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
struct perf_test_args *pta = &perf_test_args;
struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
uint64_t gva;
uint64_t pages;
int i;
@ -37,9 +56,9 @@ static void guest_code(uint32_t vcpu_id)
while (true) {
for (i = 0; i < pages; i++) {
uint64_t addr = gva + (i * perf_test_args.guest_page_size);
uint64_t addr = gva + (i * pta->guest_page_size);
if (i % perf_test_args.wr_fract == 0)
if (i % pta->wr_fract == 0)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
@ -49,35 +68,81 @@ static void guest_code(uint32_t vcpu_id)
}
}
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct perf_test_vcpu_args *vcpu_args;
int vcpu_id;
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
vcpu_args = &pta->vcpu_args[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(vcpu_id * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
pta->guest_page_size;
vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
pta->guest_page_size;
vcpu_args->gpa = pta->gpa;
}
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
(vcpu_args->pages * pta->guest_page_size));
}
}
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src)
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct kvm_vm *vm;
uint64_t guest_num_pages;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
perf_test_args.host_page_size = getpagesize();
perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
/* By default vCPUs will write to memory. */
pta->wr_fract = 1;
/*
* Snapshot the non-huge page size. This is used by the guest code to
* access/dirty pages at the logging granularity.
*/
pta->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
(vcpus * vcpu_memory_bytes) / pta->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0,
"Guest memory cannot be evenly divided into %d slots.",
slots);
/*
* Pass guest_num_pages to populate the page tables for test memory.
* The memory is also added to memslot 0, but that's a benign side
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
0, guest_code, NULL);
guest_num_pages, 0, guest_code, NULL);
perf_test_args.vm = vm;
pta->vm = vm;
/*
* If there should be more memory in the guest test region than there
@ -90,20 +155,18 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
guest_num_pages, vm_get_max_gfn(vm), vcpus,
vcpu_memory_bytes);
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
perf_test_args.guest_page_size;
guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
pta->gpa = align_down(pta->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
pta->gpa = align_down(pta->gpa, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
uint64_t region_pages = guest_num_pages / slots;
vm_paddr_t region_start = guest_test_phys_mem +
region_pages * perf_test_args.guest_page_size * i;
vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
PERF_TEST_MEM_SLOT_INDEX + i,
@ -111,10 +174,15 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
}
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
ucall_init(vm, NULL);
/* Export the shared variables to the guest. */
sync_global_to_guest(vm, perf_test_args);
return vm;
}
@ -124,36 +192,60 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
{
perf_test_args.wr_fract = wr_fract;
sync_global_to_guest(vm, perf_test_args);
}
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
WRITE_ONCE(vcpu->running, true);
/*
* Wait for all vCPU threads to be up and running before calling the test-
* provided vCPU thread function. This prevents thread creation (which
* requires taking the mmap_sem in write mode) from interfering with the
* guest faulting in its memory.
*/
while (!READ_ONCE(all_vcpu_threads_running))
;
vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
return NULL;
}
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
{
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
int vcpu_id;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(vcpu_id * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
perf_test_args.guest_page_size;
vcpu_gpa = guest_test_phys_mem +
(vcpu_id * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
perf_test_args.guest_page_size;
vcpu_gpa = guest_test_phys_mem;
}
vcpu->vcpu_id = vcpu_id;
WRITE_ONCE(vcpu->running, false);
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa +
(vcpu_args->pages * perf_test_args.guest_page_size));
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
}
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
while (!READ_ONCE(vcpu_threads[vcpu_id].running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
void perf_test_join_vcpu_threads(int vcpus)
{
int vcpu_id;
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id].thread, NULL);
}

View File

@ -283,6 +283,11 @@ size_t get_backing_src_pagesz(uint32_t i)
}
}
bool is_backing_src_hugetlb(uint32_t i)
{
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
}
static void print_available_backing_src_types(const char *prefix)
{
int i;

View File

@ -36,11 +36,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct perf_test_vcpu_args *vcpu_args =
(struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
@ -59,8 +57,6 @@ static void *vcpu_worker(void *data)
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
return NULL;
}
struct memslot_antagonist_args {
@ -80,7 +76,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
* Add the dummy memslot just below the perf_test_util memslot, which is
* at the top of the guest physical address space.
*/
gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
gpa = perf_test_args.gpa - pages * vm_get_page_size(vm);
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
@ -100,29 +96,15 @@ struct test_params {
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
int vcpu_id;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS);
perf_test_args.wr_fract = 1;
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, perf_test_args);
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
pr_info("Finished creating vCPUs\n");
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
@ -131,16 +113,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
run_vcpus = false;
/* Wait for the vcpu threads to quit */
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id], NULL);
perf_test_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");
ucall_uninit(vm);
kvm_vm_free(vm);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}
static void help(char *name)

View File

@ -24,8 +24,12 @@
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
#define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + 0x20)
#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
#define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20)
#define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40)
#define EVTCHN_VECTOR 0x10
static struct kvm_vm *vm;
@ -56,15 +60,44 @@ struct vcpu_runstate_info {
uint64_t time[4];
};
struct arch_vcpu_info {
unsigned long cr2;
unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
};
struct vcpu_info {
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
#define RUNSTATE_running 0
#define RUNSTATE_runnable 1
#define RUNSTATE_blocked 2
#define RUNSTATE_offline 3
static void evtchn_handler(struct ex_regs *regs)
{
struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
vi->evtchn_upcall_pending = 0;
GUEST_SYNC(0x20);
}
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
__asm__ __volatile__(
"sti\n"
"nop\n"
);
/* Trigger an interrupt injection */
GUEST_SYNC(0);
/* Test having the host set runstates manually */
GUEST_SYNC(RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
@ -153,7 +186,7 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr vi = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = SHINFO_REGION_GPA + 0x40,
.u.gpa = VCPU_INFO_ADDR,
};
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi);
@ -163,6 +196,16 @@ int main(int argc, char *argv[])
};
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
.u.vector = EVTCHN_VECTOR,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) {
struct kvm_xen_vcpu_attr st = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
@ -171,9 +214,14 @@ int main(int argc, char *argv[])
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st);
}
struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0;
struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
rs->state = 0x5a;
bool evtchn_irq_expected = false;
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
@ -193,16 +241,21 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr rst;
long rundelay;
/* If no runstate support, bail out early */
if (!do_runstate_tests)
goto done;
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
if (do_runstate_tests)
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
switch (uc.args[1]) {
case RUNSTATE_running...RUNSTATE_offline:
case 0:
evtchn_irq_expected = true;
vinfo->evtchn_upcall_pending = 1;
break;
case RUNSTATE_runnable...RUNSTATE_offline:
TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
if (!do_runstate_tests)
goto done;
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1];
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
@ -236,6 +289,10 @@ int main(int argc, char *argv[])
sched_yield();
} while (get_run_delay() < rundelay);
break;
case 0x20:
TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
evtchn_irq_expected = false;
break;
}
break;
}

View File

@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{
if (pfn == 0)
return;
if (cache)
cache->pfn = cache->gfn = 0;
if (dirty)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);
}
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
struct gfn_to_pfn_cache *cache, u64 gen)
{
kvm_release_pfn(cache->pfn, cache->dirty, cache);
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
cache->gfn = gfn;
cache->dirty = false;
cache->generation = gen;
}
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool atomic)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
kvm_pfn_t pfn;
void *hva = NULL;
struct page *page = KVM_UNMAPPED_PAGE;
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
u64 gen = slots->generation;
if (!map)
return -EINVAL;
if (cache) {
if (!cache->pfn || cache->gfn != gfn ||
cache->generation != gen) {
if (atomic)
return -EAGAIN;
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
}
pfn = cache->pfn;
} else {
if (atomic)
return -EAGAIN;
pfn = gfn_to_pfn_memslot(slot, gfn);
}
pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_noslot_pfn(pfn))
return -EINVAL;
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (atomic)
hva = kmap_atomic(page);
else
hva = kmap(page);
hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM
} else if (!atomic) {
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
} else {
return -EINVAL;
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
return 0;
}
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic)
{
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
cache, atomic);
}
EXPORT_SYMBOL_GPL(kvm_map_gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
NULL, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
static void __kvm_unmap_gfn(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool dirty, bool atomic)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
if (!map)
return;
@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
if (!map->hva)
return;
if (map->page != KVM_UNMAPPED_PAGE) {
if (atomic)
kunmap_atomic(map->hva);
else
kunmap(map->page);
}
if (map->page != KVM_UNMAPPED_PAGE)
kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
else if (!atomic)
memunmap(map->hva);
else
WARN_ONCE(1, "Unexpected unmapping in atomic context");
memunmap(map->hva);
#endif
if (dirty)
mark_page_dirty_in_slot(kvm, memslot, map->gfn);
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
if (cache)
cache->dirty |= dirty;
else
kvm_release_pfn(map->pfn, dirty, NULL);
kvm_release_pfn(map->pfn, dirty);
map->hva = NULL;
map->page = NULL;
}
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
cache, dirty, atomic);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
map, NULL, dirty, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)