mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
KVM: arm64: Simplify vgic-v3 hypercalls
Consolidate the GICv3 VMCR accessor hypercalls into the APR save/restore hypercalls so that all of the EL2 GICv3 state is covered by a single pair of hypercalls. Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240423150538.2103045-17-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
9c30fc615d
commit
948e1a53c2
@ -73,10 +73,8 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
|
||||
@ -241,8 +239,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern u64 __vgic_v3_get_gic_config(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
||||
extern u64 __kvm_get_mdcr_el2(void);
|
||||
|
@ -80,8 +80,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
|
@ -784,9 +784,8 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
* doorbells to be signalled, should an interrupt become pending.
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_vgic_vmcr_sync(vcpu);
|
||||
vcpu_set_flag(vcpu, IN_WFI);
|
||||
vgic_v4_put(vcpu);
|
||||
kvm_vgic_put(vcpu);
|
||||
preempt_enable();
|
||||
|
||||
kvm_vcpu_halt(vcpu);
|
||||
@ -794,7 +793,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
|
||||
preempt_disable();
|
||||
vcpu_clear_flag(vcpu, IN_WFI);
|
||||
vgic_v4_load(vcpu);
|
||||
kvm_vgic_load(vcpu);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -175,16 +175,6 @@ static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__vgic_v3_init_lrs();
|
||||
@ -195,18 +185,18 @@ static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
|
||||
__vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
|
||||
__vgic_v3_restore_vmcr_aprs(kern_hyp_va(cpu_if));
|
||||
}
|
||||
|
||||
static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
|
||||
@ -337,10 +327,8 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__vgic_v3_read_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_write_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_save_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
|
||||
HANDLE_FUNC(__pkvm_vcpu_init_traps),
|
||||
HANDLE_FUNC(__pkvm_init_vm),
|
||||
HANDLE_FUNC(__pkvm_init_vcpu),
|
||||
|
@ -330,7 +330,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
|
||||
write_gicreg(0, ICH_HCR_EL2);
|
||||
}
|
||||
|
||||
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
u64 val;
|
||||
u32 nr_pre_bits;
|
||||
@ -363,7 +363,7 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
}
|
||||
}
|
||||
|
||||
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
static void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
u64 val;
|
||||
u32 nr_pre_bits;
|
||||
@ -455,16 +455,35 @@ u64 __vgic_v3_get_gic_config(void)
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 __vgic_v3_read_vmcr(void)
|
||||
static u64 __vgic_v3_read_vmcr(void)
|
||||
{
|
||||
return read_gicreg(ICH_VMCR_EL2);
|
||||
}
|
||||
|
||||
void __vgic_v3_write_vmcr(u32 vmcr)
|
||||
static void __vgic_v3_write_vmcr(u32 vmcr)
|
||||
{
|
||||
write_gicreg(vmcr, ICH_VMCR_EL2);
|
||||
}
|
||||
|
||||
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
__vgic_v3_save_aprs(cpu_if);
|
||||
if (cpu_if->vgic_sre)
|
||||
cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
|
||||
}
|
||||
|
||||
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
/*
|
||||
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
|
||||
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
|
||||
* VMCR_EL2 save/restore in the world switch.
|
||||
*/
|
||||
if (cpu_if->vgic_sre)
|
||||
__vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
|
||||
__vgic_v3_restore_aprs(cpu_if);
|
||||
}
|
||||
|
||||
static int __vgic_v3_bpr_min(void)
|
||||
{
|
||||
/* See Pseudocode for VPriorityGroup */
|
||||
|
@ -464,17 +464,10 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
|
||||
kvm_vgic_global_state.vctrl_base + GICH_APR);
|
||||
}
|
||||
|
||||
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
|
||||
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
|
||||
}
|
||||
|
||||
void vgic_v2_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
|
||||
vgic_v2_vmcr_sync(vcpu);
|
||||
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
|
||||
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
|
||||
}
|
||||
|
@ -722,15 +722,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
/*
|
||||
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
|
||||
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
|
||||
* VMCR_EL2 save/restore in the world switch.
|
||||
*/
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
|
||||
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_activate_traps(cpu_if);
|
||||
@ -738,24 +730,13 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
||||
WARN_ON(vgic_v4_load(vcpu));
|
||||
}
|
||||
|
||||
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
|
||||
}
|
||||
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
|
||||
WARN_ON(vgic_v4_put(vcpu));
|
||||
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_deactivate_traps(cpu_if);
|
||||
}
|
||||
|
@ -939,17 +939,6 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
|
||||
vgic_v3_put(vcpu);
|
||||
}
|
||||
|
||||
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
return;
|
||||
|
||||
if (kvm_vgic_global_state.type == VGIC_V2)
|
||||
vgic_v2_vmcr_sync(vcpu);
|
||||
else
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
@ -214,7 +214,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
||||
void vgic_v2_init_lrs(void);
|
||||
void vgic_v2_load(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_put(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
@ -253,7 +252,6 @@ bool vgic_v3_check_base(struct kvm *kvm);
|
||||
|
||||
void vgic_v3_load(struct kvm_vcpu *vcpu);
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu);
|
||||
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool vgic_has_its(struct kvm *kvm);
|
||||
int kvm_vgic_register_its_device(void);
|
||||
|
@ -388,7 +388,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_vgic_load(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_put(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
||||
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
|
||||
|
Loading…
Reference in New Issue
Block a user