* Fix failure to start guests with kvm.use_gisa=0
 
 * Panic if (un)share fails to maintain security.
 
 ARM:
 
 * Use kvfree() for the kvmalloc'd nested MMUs array
 
 * Set of fixes to address warnings in W=1 builds
 
 * Make KVM depend on assembler support for ARMv8.4
 
 * Fix for vgic-debug interface for VMs without LPIs
 
 * Actually check ID_AA64MMFR3_EL1.S1PIE in get-reg-list selftest
 
 * Minor code / comment cleanups for configuring PAuth traps
 
 * Take kvm->arch.config_lock to prevent destruction / initialization
   race for a vCPU's CPUIF which may lead to a UAF
 
 x86:
 
 * Disallow read-only memslots for SEV-ES and SEV-SNP (and TDX)
 
 * Fix smatch issues
 
 * Small cleanups
 
 * Make x2APIC ID 100% readonly
 
 * Fix typo in uapi constant
 
 Generic:
 
 * Use synchronize_srcu_expedited() on irqfd shutdown
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAma85nQUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroPq5gf8DVZjs2yNwdPLAUN7AOpElsobFnVN
 etJ1V09XsfSYMIAV6ksvC1VBzHpyOJN2QKuF0nfIiISsV8W9xLcV2rKIodsGBvdV
 K5ODL/yxeYI27t6Uferra1AGlchtn3tlpZzVarZIgRvZa3NMXaQPYJdcQr2Oybou
 7hZsboMTl6jaSl6NELzcBRksfkcOqQLQoUqVBqlkBTM3yyFRmV85BisRkOWCIBfA
 9c+kn7ZWBfOqYaXjiLeCrdCKrUuFLfR3ejJibYFan5MULYHIL95W/WJo9uQJ1QBr
 BNjMfmtVZ2JOWya40uUSKrvxJ0IErAlMNgmnpjeA4cBqYK5GRHUKvO5jNw==
 =A8SH
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "s390:

   - Fix failure to start guests with kvm.use_gisa=0

   - Panic if (un)share fails to maintain security.

  ARM:

   - Use kvfree() for the kvmalloc'd nested MMUs array

   - Set of fixes to address warnings in W=1 builds

   - Make KVM depend on assembler support for ARMv8.4

   - Fix for vgic-debug interface for VMs without LPIs

   - Actually check ID_AA64MMFR3_EL1.S1PIE in get-reg-list selftest

   - Minor code / comment cleanups for configuring PAuth traps

   - Take kvm->arch.config_lock to prevent destruction / initialization
     race for a vCPU's CPUIF which may lead to a UAF

  x86:

   - Disallow read-only memslots for SEV-ES and SEV-SNP (and TDX)

   - Fix smatch issues

   - Small cleanups

   - Make x2APIC ID 100% readonly

   - Fix typo in uapi constant

  Generic:

   - Use synchronize_srcu_expedited() on irqfd shutdown"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (21 commits)
  KVM: SEV: uapi: fix typo in SEV_RET_INVALID_CONFIG
  KVM: x86: Disallow read-only memslots for SEV-ES and SEV-SNP (and TDX)
  KVM: eventfd: Use synchronize_srcu_expedited() on shutdown
  KVM: selftests: Add a testcase to verify x2APIC is fully readonly
  KVM: x86: Make x2APIC ID 100% readonly
  KVM: x86: Use this_cpu_ptr() instead of per_cpu_ptr(smp_processor_id())
  KVM: x86: hyper-v: Remove unused inline function kvm_hv_free_pa_page()
  KVM: SVM: Fix an error code in sev_gmem_post_populate()
  KVM: SVM: Fix uninitialized variable bug
  KVM: arm64: vgic: Hold config_lock while tearing down a CPU interface
  KVM: selftests: arm64: Correct feature test for S1PIE in get-reg-list
  KVM: arm64: Tidying up PAuth code in KVM
  KVM: arm64: vgic-debug: Exit the iterator properly w/o LPI
  KVM: arm64: Enforce dependency on an ARMv8.4-aware toolchain
  s390/uv: Panic for set and remove shared access UVC errors
  KVM: s390: fix validity interception issue when gisa is switched off
  docs: KVM: Fix register ID of SPSR_FIQ
  KVM: arm64: vgic: fix unexpected unlock sparse warnings
  KVM: arm64: fix kdoc warnings in W=1 builds
  KVM: arm64: fix override-init warnings in W=1 builds
  ...
This commit is contained in:
Linus Torvalds 2024-08-14 13:46:24 -07:00
commit d07b43284a
30 changed files with 120 additions and 62 deletions

View File

@ -2592,7 +2592,7 @@ Specifically:
0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT]
0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
0x6030 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_
0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_
...

View File

@ -104,7 +104,7 @@ alternative_else_nop_endif
#define __ptrauth_save_key(ctxt, key) \
do { \
u64 __val; \
u64 __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \

View File

@ -19,6 +19,7 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_ARMV8_4
select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER

View File

@ -10,6 +10,9 @@ include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
CFLAGS_sys_regs.o += -Wno-override-init
CFLAGS_handle_exit.o += -Wno-override-init
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \

View File

@ -164,6 +164,7 @@ static int kvm_arm_default_max_vcpus(void)
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
* @type: kvm device type
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
@ -521,10 +522,10 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
{
if (vcpu_has_ptrauth(vcpu)) {
if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
/*
* Either we're running running an L2 guest, and the API/APK
* bits come from L1's HCR_EL2, or API/APK are both set.
* Either we're running an L2 guest, and the API/APK bits come
* from L1's HCR_EL2, or API/APK are both set.
*/
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
u64 val;
@ -541,16 +542,10 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
* Save the host keys if there is any chance for the guest
* to use pauth, as the entry code will reload the guest
* keys in that case.
* Protected mode is the exception to that rule, as the
* entry into the EL2 code eagerly switch back and forth
* between host and hyp keys (and kvm_hyp_ctxt is out of
* reach anyway).
*/
if (is_protected_kvm_enabled())
return;
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
struct kvm_cpu_context *ctxt;
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
ptrauth_save_keys(ctxt);
}

View File

@ -27,7 +27,6 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/kvm_ptrauth.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>

View File

@ -20,6 +20,8 @@ HOST_EXTRACFLAGS += -I$(objtree)/include
lib-objs := clear_page.o copy_page.o memcpy.o memset.o
lib-objs := $(addprefix ../../../lib/, $(lib-objs))
CFLAGS_switch.nvhe.o += -Wno-override-init
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o

View File

@ -173,9 +173,8 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Make sure we handle the exit for workarounds and ptrauth
* before the pKVM handling, as the latter could decide to
* UNDEF.
* Make sure we handle the exit for workarounds before the pKVM
* handling, as the latter could decide to UNDEF.
*/
return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
kvm_handle_pvm_sysreg(vcpu, exit_code));

View File

@ -6,6 +6,8 @@
asflags-y := -D__KVM_VHE_HYPERVISOR__
ccflags-y := -D__KVM_VHE_HYPERVISOR__
CFLAGS_switch.o += -Wno-override-init
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o

View File

@ -786,7 +786,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
if (!WARN_ON(atomic_read(&mmu->refcnt)))
kvm_free_stage2_pgd(mmu);
}
kfree(kvm->arch.nested_mmus);
kvfree(kvm->arch.nested_mmus);
kvm->arch.nested_mmus = NULL;
kvm->arch.nested_mmus_size = 0;
kvm_uninit_stage2_mmu(kvm);

View File

@ -45,7 +45,8 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
* Let the xarray drive the iterator after the last SPI, as the iterator
* has exhausted the sequentially-allocated INTID space.
*/
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) {
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) &&
iter->nr_lpis) {
if (iter->lpi_idx < iter->nr_lpis)
xa_find_after(&dist->lpi_xa, &iter->intid,
VGIC_LPI_MAX_INTID,
@ -112,7 +113,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
return iter->dist_id > 0 &&
iter->vcpu_id == iter->nr_cpus &&
iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
iter->lpi_idx > iter->nr_lpis;
(!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis);
}
static void *vgic_debug_start(struct seq_file *s, loff_t *pos)

View File

@ -438,14 +438,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
unsigned long i;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
__kvm_vgic_vcpu_destroy(vcpu);
mutex_lock(&kvm->arch.config_lock);
kvm_vgic_dist_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock);

View File

@ -9,7 +9,7 @@
#include <kvm/arm_vgic.h>
#include "vgic.h"
/**
/*
* vgic_irqfd_set_irq: inject the IRQ corresponding to the
* irqchip routing entry
*
@ -75,7 +75,8 @@ static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e,
msi->flags = e->msi.flags;
msi->devid = e->msi.devid;
}
/**
/*
* kvm_set_msi: inject the MSI corresponding to the
* MSI routing entry
*
@ -98,7 +99,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return vgic_its_inject_msi(kvm, &msi);
}
/**
/*
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection
*/
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,

View File

@ -2040,6 +2040,7 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
* @start_id: the ID of the first entry in the table
* (non zero for 2d level tables)
* @fn: function to apply on each entry
* @opaque: pointer to opaque data
*
* Return: < 0 on error, 0 if last element was identified, 1 otherwise
* (the last element may not be found on second level tables)
@ -2079,7 +2080,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return 1;
}
/**
/*
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
*/
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
@ -2099,6 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
/**
* vgic_its_restore_ite - restore an interrupt translation entry
*
* @its: its handle
* @event_id: id used for indexing
* @ptr: pointer to the ITE entry
* @opaque: pointer to the its_device
@ -2231,6 +2234,7 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
* @its: ITS handle
* @dev: ITS device
* @ptr: GPA
* @dte_esz: device table entry size
*/
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
gpa_t ptr, int dte_esz)
@ -2313,7 +2317,7 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
return 1;
}
/**
/*
* vgic_its_save_device_tables - Save the device table and all ITT
* into guest RAM
*
@ -2386,7 +2390,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
return ret;
}
/**
/*
* vgic_its_restore_device_tables - Restore the device table and all ITT
* from guest RAM to internal data structs
*/
@ -2478,7 +2482,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
return 1;
}
/**
/*
* vgic_its_save_collection_table - Save the collection table into
* guest RAM
*/
@ -2518,7 +2522,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
return ret;
}
/**
/*
* vgic_its_restore_collection_table - reads the collection table
* in guest memory and restores the ITS internal state. Requires the
* BASER registers to be restored before.
@ -2556,7 +2560,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
return ret;
}
/**
/*
* vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
* according to v0 ABI
*/
@ -2571,7 +2575,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its)
return vgic_its_save_collection_table(its);
}
/**
/*
* vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
* to internal data structs according to V0 ABI
*

View File

@ -370,7 +370,7 @@ static void map_all_vpes(struct kvm *kvm)
dist->its_vm.vpes[i]->irq));
}
/**
/*
* vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held
*/

View File

@ -313,7 +313,7 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
* with all locks dropped.
*/
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags)
unsigned long flags) __releases(&irq->irq_lock)
{
struct kvm_vcpu *vcpu;

View File

@ -186,7 +186,7 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq);
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags);
unsigned long flags) __releases(&irq->irq_lock);
void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending);

View File

@ -441,7 +441,10 @@ static inline int share(unsigned long addr, u16 cmd)
if (!uv_call(0, (u64)&uvcb))
return 0;
return -EINVAL;
pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
uvcb.header.rc, uvcb.header.rrc);
panic("System security cannot be guaranteed unless the system panics now.\n");
}
/*

View File

@ -267,7 +267,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{
u32 gd = virt_to_phys(kvm->arch.gisa_int.origin);
u32 gd;
if (!kvm->arch.gisa_int.origin)
return 0;
gd = virt_to_phys(kvm->arch.gisa_int.origin);
if (gd && sclp.has_gisaf)
gd |= GISA_FORMAT1;

View File

@ -2192,6 +2192,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
#define kvm_arch_has_private_mem(kvm) false
#endif
#define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
static inline u16 kvm_read_ldt(void)
{
u16 ldt;

View File

@ -286,7 +286,6 @@ static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
return HV_STATUS_ACCESS_DENIED;
}
static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
{
return false;

View File

@ -351,10 +351,8 @@ static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
* reversing the LDR calculation to get cluster of APICs, i.e. no
* additional work is required.
*/
if (apic_x2apic_mode(apic)) {
WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
if (apic_x2apic_mode(apic))
return;
}
if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
&cluster, &mask))) {
@ -2966,18 +2964,28 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s, bool set)
{
if (apic_x2apic_mode(vcpu->arch.apic)) {
u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
u32 *id = (u32 *)(s->regs + APIC_ID);
u32 *ldr = (u32 *)(s->regs + APIC_LDR);
u64 icr;
if (vcpu->kvm->arch.x2apic_format) {
if (*id != vcpu->vcpu_id)
if (*id != x2apic_id)
return -EINVAL;
} else {
/*
* Ignore the userspace value when setting APIC state.
* KVM's model is that the x2APIC ID is readonly, e.g.
* KVM only supports delivering interrupts to KVM's
* version of the x2APIC ID. However, for backwards
* compatibility, don't reject attempts to set a
* mismatched ID for userspace that hasn't opted into
* x2apic_format.
*/
if (set)
*id >>= 24;
*id = x2apic_id;
else
*id <<= 24;
*id = x2apic_id << 24;
}
/*
@ -2986,7 +2994,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
* split to ICR+ICR2 in userspace for backwards compatibility.
*/
if (set) {
*ldr = kvm_apic_calc_x2apic_ldr(*id);
*ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
(u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;

View File

@ -2276,7 +2276,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
struct sev_data_snp_launch_update fw_args = {0};
bool assigned;
bool assigned = false;
int level;
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
@ -2290,9 +2290,10 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
if (src) {
void *vaddr = kmap_local_pfn(pfn + i);
ret = copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE);
if (ret)
if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
ret = -EFAULT;
goto err;
}
kunmap_local(vaddr);
}

View File

@ -427,8 +427,7 @@ static void kvm_user_return_msr_cpu_online(void)
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{
unsigned int cpu = smp_processor_id();
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
int err;
value = (value & mask) | (msrs->values[slot].host & ~mask);
@ -450,8 +449,7 @@ EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
static void drop_user_return_notifiers(void)
{
unsigned int cpu = smp_processor_id();
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
if (msrs->registered)
kvm_on_user_return(&msrs->urn);

View File

@ -715,6 +715,13 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
}
#endif
#ifndef kvm_arch_has_readonly_mem
static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
{
return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
}
#endif
struct kvm_memslots {
u64 generation;
atomic_long_t last_used_slot;

View File

@ -51,6 +51,7 @@ typedef enum {
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
SEV_RET_INAVLID_CONFIG,
SEV_RET_INVALID_CONFIG = SEV_RET_INAVLID_CONFIG,
SEV_RET_INVALID_LEN,
SEV_RET_ALREADY_OWNED,
SEV_RET_INVALID_CERTIFICATE,

View File

@ -32,13 +32,13 @@ static struct feature_id_reg feat_id_regs[] = {
{
ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4,
8,
1
},
{
ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4,
8,
1
}
};

View File

@ -184,6 +184,33 @@ static void test_apic_id(void)
kvm_vm_free(vm);
}
static void test_x2apic_id(void)
{
struct kvm_lapic_state lapic = {};
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int i;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_set_msr(vcpu, MSR_IA32_APICBASE, MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
/*
* Try stuffing a modified x2APIC ID, KVM should ignore the value and
* always return the vCPU's default/readonly x2APIC ID.
*/
for (i = 0; i <= 0xff; i++) {
*(u32 *)(lapic.regs + APIC_ID) = i << 24;
*(u32 *)(lapic.regs + APIC_SPIV) = APIC_SPIV_APIC_ENABLED;
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic);
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &lapic);
TEST_ASSERT(*((u32 *)&lapic.regs[APIC_ID]) == vcpu->id << 24,
"x2APIC ID should be fully readonly");
}
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct xapic_vcpu x = {
@ -211,4 +238,5 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);
test_apic_id();
test_x2apic_id();
}

View File

@ -97,18 +97,19 @@ irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
list_del_rcu(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
/*
* synchronize_srcu(&kvm->irq_srcu) already called
* synchronize_srcu_expedited(&kvm->irq_srcu) already called
* in kvm_unregister_irq_ack_notifier().
*/
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
} else {
synchronize_srcu_expedited(&kvm->irq_srcu);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
@ -126,7 +127,7 @@ irqfd_shutdown(struct work_struct *work)
u64 cnt;
/* Make sure irqfd has been initialized in assign path. */
synchronize_srcu(&kvm->irq_srcu);
synchronize_srcu_expedited(&kvm->irq_srcu);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
@ -384,7 +385,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
synchronize_srcu(&kvm->irq_srcu);
synchronize_srcu_expedited(&kvm->irq_srcu);
mutex_unlock(&kvm->irqfds.resampler_lock);
}
@ -523,7 +524,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
synchronize_srcu_expedited(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
@ -608,7 +609,7 @@ kvm_irqfd_release(struct kvm *kvm)
/*
* Take note of a change in irq routing.
* Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
* Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards.
*/
void kvm_irq_routing_update(struct kvm *kvm)
{

View File

@ -1578,15 +1578,14 @@ static int check_memory_region_flags(struct kvm *kvm,
if (mem->flags & KVM_MEM_GUEST_MEMFD)
valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
#ifdef CONFIG_HAVE_KVM_READONLY_MEM
/*
* GUEST_MEMFD is incompatible with read-only memslots, as writes to
* read-only memslots have emulated MMIO, not page fault, semantics,
* and KVM doesn't allow emulated MMIO for private memory.
*/
if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
if (kvm_arch_has_readonly_mem(kvm) &&
!(mem->flags & KVM_MEM_GUEST_MEMFD))
valid_flags |= KVM_MEM_READONLY;
#endif
if (mem->flags & ~valid_flags)
return -EINVAL;