Bug fixes (ARM, s390, x86)
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJZPOhfAAoJEL/70l94x66DNgYH/i1pbBmsxeYxWEoScLDTVYZa gLKHpjpciCBcKdMGSuBXeP702pi85N/TG7M2XIT/nXmFHYsie9RUSWXK63IZxpPx 7wRNocstQU+DyMAP3pagJIFPnhUT9ufCZYFDin8sQoh1Dk1xbV38WaUPb/YfPYIt xGyti2SzT/CiBOR5zQiNb8m8k+M19QGzjcglHRq5Uk/oSElaw635M6u3PZD0Zvtd 9L3NhtDYp1tUpG2Rc/5fXX651BVl+J6+xMukuAJBSqcI2hNfe0oM7tTi6larPHPo eguW0ChU2qat7Gjob18oKlLAMGwPHt+p7utH2pju5UTWFdPBY2lxUgv5Yfd3Qzo= =C/gM -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Bug fixes (ARM, s390, x86)" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: async_pf: avoid async pf injection when in guest mode KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation arm: KVM: Allow unaligned accesses at HYP arm64: KVM: Allow unaligned accesses at EL2 arm64: KVM: Preserve RES1 bits in SCTLR_EL2 KVM: arm/arm64: Handle possible NULL stage2 pud when ageing pages KVM: nVMX: Fix exception injection kvm: async_pf: fix rcu_irq_enter() with irqs enabled KVM: arm/arm64: vgic-v3: Fix nr_pre_bits bitfield extraction KVM: s390: fix ais handling vs cpu model KVM: arm/arm64: Fix isues with GICv2 on GICv3 migration
This commit is contained in:
commit
9d0eb46246
@ -104,7 +104,6 @@ __do_hyp_init:
|
||||
@ - Write permission implies XN: disabled
|
||||
@ - Instruction cache: enabled
|
||||
@ - Data/Unified cache: enabled
|
||||
@ - Memory alignment checks: enabled
|
||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
ldr r2, =HSCTLR_MASK
|
||||
@ -112,8 +111,8 @@ __do_hyp_init:
|
||||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||
and r1, r1, r2
|
||||
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
||||
ARM( ldr r2, =(HSCTLR_M) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||
orr r1, r1, r2
|
||||
orr r0, r0, r1
|
||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
|
@ -286,6 +286,10 @@
|
||||
#define SCTLR_ELx_A (1 << 1)
|
||||
#define SCTLR_ELx_M 1
|
||||
|
||||
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
|
||||
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
|
||||
(1 << 28) | (1 << 29))
|
||||
|
||||
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||
|
||||
|
@ -106,10 +106,13 @@ __do_hyp_init:
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
mrs x4, sctlr_el2
|
||||
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
|
||||
ldr x5, =SCTLR_ELx_FLAGS
|
||||
orr x4, x4, x5
|
||||
/*
|
||||
* Preserve all the RES1 bits while setting the default flags,
|
||||
* as well as the EE bit on BE. Drop the A flag since the compiler
|
||||
* is allowed to generate unaligned accesses.
|
||||
*/
|
||||
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
||||
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
||||
msr sctlr_el2, x4
|
||||
isb
|
||||
|
||||
|
@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
|
||||
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
|
||||
*/
|
||||
vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
|
||||
vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
|
||||
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
|
||||
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
|
||||
vgic_set_vmcr(vcpu, &vmcr);
|
||||
} else {
|
||||
val = 0;
|
||||
@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
|
||||
* Extract it directly using ICC_CTLR_EL1 reg definitions.
|
||||
*/
|
||||
val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
|
||||
val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
|
||||
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
|
||||
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
|
||||
|
||||
p->regval = val;
|
||||
}
|
||||
@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
p->regval = 0;
|
||||
|
||||
vgic_get_vmcr(vcpu, &vmcr);
|
||||
if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
|
||||
if (!vmcr.cbpr) {
|
||||
if (p->is_write) {
|
||||
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
|
||||
ICC_BPR1_EL1_SHIFT;
|
||||
|
@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
|
||||
struct mutex ais_lock;
|
||||
u8 simm;
|
||||
u8 nimm;
|
||||
int ais_enabled;
|
||||
};
|
||||
|
||||
struct kvm_hw_wp_info_arch {
|
||||
|
@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_ais_req req;
|
||||
int ret = 0;
|
||||
|
||||
if (!fi->ais_enabled)
|
||||
if (!test_kvm_facility(kvm, 72))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
||||
@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
if (!fi->ais_enabled || !adapter->suppressible)
|
||||
if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
|
||||
return kvm_s390_inject_vm(kvm, &s390int);
|
||||
|
||||
mutex_lock(&fi->ais_lock);
|
||||
|
@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
} else {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
||||
kvm->arch.float_int.ais_enabled = 1;
|
||||
r = 0;
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
mutex_init(&kvm->arch.float_int.ais_lock);
|
||||
kvm->arch.float_int.simm = 0;
|
||||
kvm->arch.float_int.nimm = 0;
|
||||
kvm->arch.float_int.ais_enabled = 0;
|
||||
spin_lock_init(&kvm->arch.float_int.lock);
|
||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||
|
@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
|
||||
*/
|
||||
rcu_irq_exit();
|
||||
native_safe_halt();
|
||||
rcu_irq_enter();
|
||||
local_irq_disable();
|
||||
rcu_irq_enter();
|
||||
}
|
||||
}
|
||||
if (!n.halted)
|
||||
|
@ -780,18 +780,20 @@ out:
|
||||
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
||||
int j, nent = vcpu->arch.cpuid_nent;
|
||||
struct kvm_cpuid_entry2 *ej;
|
||||
int j = i;
|
||||
int nent = vcpu->arch.cpuid_nent;
|
||||
|
||||
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
/* when no next entry is found, the current entry[i] is reselected */
|
||||
for (j = i + 1; ; j = (j + 1) % nent) {
|
||||
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
|
||||
if (ej->function == e->function) {
|
||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
return j;
|
||||
}
|
||||
}
|
||||
return 0; /* silence gcc, even though control never reaches here */
|
||||
do {
|
||||
j = (j + 1) % nent;
|
||||
ej = &vcpu->arch.cpuid_entries[j];
|
||||
} while (ej->function != e->function);
|
||||
|
||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
|
||||
return j;
|
||||
}
|
||||
|
||||
/* find an entry with matching function, matching index (if needed), and that
|
||||
|
@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
||||
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||
}
|
||||
|
||||
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(!lapic_in_kernel(vcpu) ||
|
||||
kvm_event_needs_reinjection(vcpu)))
|
||||
return false;
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_x86_ops->interrupt_allowed(vcpu);
|
||||
}
|
||||
|
||||
@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||
if (!async)
|
||||
return false; /* *pfn has correct page already */
|
||||
|
||||
if (!prefault && can_do_async_pf(vcpu)) {
|
||||
if (!prefault && kvm_can_do_async_pf(vcpu)) {
|
||||
trace_kvm_try_async_get_page(gva, gfn);
|
||||
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
||||
trace_kvm_async_pf_doublefault(gva, gfn);
|
||||
|
@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
bool accessed_dirty);
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||
{
|
||||
|
@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
|
||||
if (!(vmcs12->exception_bitmap & (1u << nr)))
|
||||
return 0;
|
||||
|
||||
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
|
||||
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
||||
vmcs_read32(VM_EXIT_INTR_INFO),
|
||||
vmcs_readl(EXIT_QUALIFICATION));
|
||||
return 1;
|
||||
|
@ -8607,8 +8607,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
|
||||
return true;
|
||||
else
|
||||
return !kvm_event_needs_reinjection(vcpu) &&
|
||||
kvm_x86_ops->interrupt_allowed(vcpu);
|
||||
return kvm_can_do_async_pf(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_start_assignment(struct kvm *kvm)
|
||||
|
@ -417,6 +417,10 @@
|
||||
#define ICH_HCR_EN (1 << 0)
|
||||
#define ICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define ICH_VMCR_ACK_CTL_SHIFT 2
|
||||
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
|
||||
#define ICH_VMCR_FIQ_EN_SHIFT 3
|
||||
#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
|
||||
#define ICH_VMCR_CBPR_SHIFT 4
|
||||
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
|
||||
#define ICH_VMCR_EOIM_SHIFT 9
|
||||
|
@ -25,7 +25,18 @@
|
||||
#define GICC_ENABLE 0x1
|
||||
#define GICC_INT_PRI_THRESHOLD 0xf0
|
||||
|
||||
#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
|
||||
#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
|
||||
#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
|
||||
#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
|
||||
#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
|
||||
#define GIC_CPU_CTRL_AckCtl_SHIFT 2
|
||||
#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
|
||||
#define GIC_CPU_CTRL_FIQEn_SHIFT 3
|
||||
#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
|
||||
#define GIC_CPU_CTRL_CBPR_SHIFT 4
|
||||
#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
|
||||
#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
|
||||
#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
|
||||
|
||||
#define GICC_IAR_INT_ID_MASK 0x3ff
|
||||
#define GICC_INT_SPURIOUS 1023
|
||||
@ -84,8 +95,19 @@
|
||||
#define GICH_LR_EOI (1 << 19)
|
||||
#define GICH_LR_HW (1 << 31)
|
||||
|
||||
#define GICH_VMCR_CTRL_SHIFT 0
|
||||
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
|
||||
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
|
||||
#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
|
||||
#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
|
||||
#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
|
||||
#define GICH_VMCR_ACK_CTL_SHIFT 2
|
||||
#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
|
||||
#define GICH_VMCR_FIQ_EN_SHIFT 3
|
||||
#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
|
||||
#define GICH_VMCR_CBPR_SHIFT 4
|
||||
#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
|
||||
#define GICH_VMCR_EOI_MODE_SHIFT 9
|
||||
#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
|
||||
|
||||
#define GICH_VMCR_PRIMASK_SHIFT 27
|
||||
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
|
||||
#define GICH_VMCR_BINPOINT_SHIFT 21
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
|
||||
#define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1)
|
||||
#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
|
||||
|
||||
static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
|
||||
{
|
||||
|
@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
|
||||
pmd_t *pmd;
|
||||
|
||||
pud = stage2_get_pud(kvm, cache, addr);
|
||||
if (!pud)
|
||||
return NULL;
|
||||
|
||||
if (stage2_pud_none(*pud)) {
|
||||
if (!cache)
|
||||
return NULL;
|
||||
|
@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
|
||||
|
||||
switch (addr & 0xff) {
|
||||
case GIC_CPU_CTRL:
|
||||
val = vmcr.ctlr;
|
||||
val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
|
||||
val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
|
||||
val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
|
||||
val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
|
||||
val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
|
||||
val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
|
||||
|
||||
break;
|
||||
case GIC_CPU_PRIMASK:
|
||||
/*
|
||||
@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
|
||||
|
||||
switch (addr & 0xff) {
|
||||
case GIC_CPU_CTRL:
|
||||
vmcr.ctlr = val;
|
||||
vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
|
||||
vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
|
||||
vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
|
||||
vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
|
||||
vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
|
||||
vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
|
||||
|
||||
break;
|
||||
case GIC_CPU_PRIMASK:
|
||||
/*
|
||||
|
@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
u32 vmcr;
|
||||
|
||||
vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
|
||||
vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
|
||||
GICH_VMCR_ENABLE_GRP0_MASK;
|
||||
vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
|
||||
GICH_VMCR_ENABLE_GRP1_MASK;
|
||||
vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
|
||||
GICH_VMCR_ACK_CTL_MASK;
|
||||
vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
|
||||
GICH_VMCR_FIQ_EN_MASK;
|
||||
vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
|
||||
GICH_VMCR_CBPR_MASK;
|
||||
vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
|
||||
GICH_VMCR_EOI_MODE_MASK;
|
||||
vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
|
||||
GICH_VMCR_ALIAS_BINPOINT_MASK;
|
||||
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
|
||||
@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
||||
|
||||
vmcr = cpu_if->vgic_vmcr;
|
||||
|
||||
vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
|
||||
GICH_VMCR_CTRL_SHIFT;
|
||||
vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
|
||||
GICH_VMCR_ENABLE_GRP0_SHIFT;
|
||||
vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
|
||||
GICH_VMCR_ENABLE_GRP1_SHIFT;
|
||||
vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
|
||||
GICH_VMCR_ACK_CTL_SHIFT;
|
||||
vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
|
||||
GICH_VMCR_FIQ_EN_SHIFT;
|
||||
vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
|
||||
GICH_VMCR_CBPR_SHIFT;
|
||||
vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
|
||||
GICH_VMCR_EOI_MODE_SHIFT;
|
||||
|
||||
vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
|
||||
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
|
||||
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
|
||||
|
@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
|
||||
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
||||
u32 vmcr;
|
||||
|
||||
/*
|
||||
* Ignore the FIQen bit, because GIC emulation always implies
|
||||
* SRE=1 which means the vFIQEn bit is also RES1.
|
||||
*/
|
||||
vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) <<
|
||||
ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
|
||||
vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
|
||||
if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||
vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
|
||||
ICH_VMCR_ACK_CTL_MASK;
|
||||
vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
|
||||
ICH_VMCR_FIQ_EN_MASK;
|
||||
} else {
|
||||
/*
|
||||
* When emulating GICv3 on GICv3 with SRE=1 on the
|
||||
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
|
||||
*/
|
||||
vmcr = ICH_VMCR_FIQ_EN_MASK;
|
||||
}
|
||||
|
||||
vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
|
||||
vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
|
||||
vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
|
||||
vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
|
||||
vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
|
||||
@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
||||
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
||||
u32 vmcr;
|
||||
|
||||
vmcr = cpu_if->vgic_vmcr;
|
||||
|
||||
/*
|
||||
* Ignore the FIQen bit, because GIC emulation always implies
|
||||
* SRE=1 which means the vFIQEn bit is also RES1.
|
||||
*/
|
||||
vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) <<
|
||||
ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
|
||||
vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
|
||||
if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||
vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
|
||||
ICH_VMCR_ACK_CTL_SHIFT;
|
||||
vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
|
||||
ICH_VMCR_FIQ_EN_SHIFT;
|
||||
} else {
|
||||
/*
|
||||
* When emulating GICv3 on GICv3 with SRE=1 on the
|
||||
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
|
||||
*/
|
||||
vmcrp->fiqen = 1;
|
||||
vmcrp->ackctl = 0;
|
||||
}
|
||||
|
||||
vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
|
||||
vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
|
||||
vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
|
||||
vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
|
||||
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
|
||||
|
@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
|
||||
* registers regardless of the hardware backed GIC used.
|
||||
*/
|
||||
struct vgic_vmcr {
|
||||
u32 ctlr;
|
||||
u32 grpen0;
|
||||
u32 grpen1;
|
||||
|
||||
u32 ackctl;
|
||||
u32 fiqen;
|
||||
u32 cbpr;
|
||||
u32 eoim;
|
||||
|
||||
u32 abpr;
|
||||
u32 bpr;
|
||||
u32 pmr; /* Priority mask field in the GICC_PMR and
|
||||
* ICC_PMR_EL1 priority field format */
|
||||
/* Below member variable are valid only for GICv3 */
|
||||
u32 grpen0;
|
||||
u32 grpen1;
|
||||
};
|
||||
|
||||
struct vgic_reg_attr {
|
||||
|
Loading…
Reference in New Issue
Block a user