forked from Minki/linux
KVM: arm64: Delay the polling of the GICR_VPENDBASER.Dirty bit
In order to reduce the impact of the VPT parsing happening on the GIC, we can split the vcpu reseidency in two phases: - programming GICR_VPENDBASER: this still happens in vcpu_load() - checking for the VPT parsing to be complete: this can happen on vcpu entry (in kvm_vgic_flush_hwstate()) This allows the GIC and the CPU to work in parallel, rewmoving some of the entry overhead. Suggested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Shenming Lu <lushenming@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20201128141857.983-3-lushenming@huawei.com
This commit is contained in:
parent
bf118a5cb7
commit
57e3cebd02
@ -353,6 +353,18 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vgic_v4_commit(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No need to wait for the vPE to be ready across a shallow guest
|
||||||
|
* exit, as only a vcpu_put will invalidate it.
|
||||||
|
*/
|
||||||
|
if (!vpe->ready)
|
||||||
|
its_commit_vpe(vpe);
|
||||||
|
}
|
||||||
|
|
||||||
static struct vgic_its *vgic_get_its(struct kvm *kvm,
|
static struct vgic_its *vgic_get_its(struct kvm *kvm,
|
||||||
struct kvm_kernel_irq_routing_entry *irq_entry)
|
struct kvm_kernel_irq_routing_entry *irq_entry)
|
||||||
{
|
{
|
||||||
|
@ -915,6 +915,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
if (can_access_vgic_from_kernel())
|
if (can_access_vgic_from_kernel())
|
||||||
vgic_restore_state(vcpu);
|
vgic_restore_state(vcpu);
|
||||||
|
|
||||||
|
if (vgic_supports_direct_msis(vcpu->kvm))
|
||||||
|
vgic_v4_commit(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_vgic_load(struct kvm_vcpu *vcpu)
|
void kvm_vgic_load(struct kvm_vcpu *vcpu)
|
||||||
|
@ -3842,8 +3842,6 @@ static void its_vpe_schedule(struct its_vpe *vpe)
|
|||||||
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
|
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
|
||||||
val |= GICR_VPENDBASER_Valid;
|
val |= GICR_VPENDBASER_Valid;
|
||||||
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||||
|
|
||||||
its_wait_vpt_parse_complete();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void its_vpe_deschedule(struct its_vpe *vpe)
|
static void its_vpe_deschedule(struct its_vpe *vpe)
|
||||||
@ -3891,6 +3889,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
|||||||
its_vpe_deschedule(vpe);
|
its_vpe_deschedule(vpe);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
case COMMIT_VPE:
|
||||||
|
its_wait_vpt_parse_complete();
|
||||||
|
return 0;
|
||||||
|
|
||||||
case INVALL_VPE:
|
case INVALL_VPE:
|
||||||
its_vpe_invall(vpe);
|
its_vpe_invall(vpe);
|
||||||
return 0;
|
return 0;
|
||||||
@ -4052,8 +4054,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
|
|||||||
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
|
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
|
||||||
|
|
||||||
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||||
|
|
||||||
its_wait_vpt_parse_complete();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
|
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
|
||||||
@ -4128,6 +4128,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
|||||||
its_vpe_4_1_deschedule(vpe, info);
|
its_vpe_4_1_deschedule(vpe, info);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
case COMMIT_VPE:
|
||||||
|
its_wait_vpt_parse_complete();
|
||||||
|
return 0;
|
||||||
|
|
||||||
case INVALL_VPE:
|
case INVALL_VPE:
|
||||||
its_vpe_4_1_invall(vpe);
|
its_vpe_4_1_invall(vpe);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
|
|||||||
if (!ret)
|
if (!ret)
|
||||||
vpe->resident = false;
|
vpe->resident = false;
|
||||||
|
|
||||||
|
vpe->ready = false;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int its_commit_vpe(struct its_vpe *vpe)
|
||||||
|
{
|
||||||
|
struct its_cmd_info info = {
|
||||||
|
.cmd_type = COMMIT_VPE,
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
WARN_ON(preemptible());
|
||||||
|
|
||||||
|
ret = its_send_vpe_cmd(vpe, &info);
|
||||||
|
if (!ret)
|
||||||
|
vpe->ready = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int its_invall_vpe(struct its_vpe *vpe)
|
int its_invall_vpe(struct its_vpe *vpe)
|
||||||
{
|
{
|
||||||
struct its_cmd_info info = {
|
struct its_cmd_info info = {
|
||||||
|
@ -402,6 +402,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
|
|||||||
struct kvm_kernel_irq_routing_entry *irq_entry);
|
struct kvm_kernel_irq_routing_entry *irq_entry);
|
||||||
|
|
||||||
int vgic_v4_load(struct kvm_vcpu *vcpu);
|
int vgic_v4_load(struct kvm_vcpu *vcpu);
|
||||||
|
void vgic_v4_commit(struct kvm_vcpu *vcpu);
|
||||||
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
|
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
|
||||||
|
|
||||||
#endif /* __KVM_ARM_VGIC_H */
|
#endif /* __KVM_ARM_VGIC_H */
|
||||||
|
@ -39,6 +39,8 @@ struct its_vpe {
|
|||||||
irq_hw_number_t vpe_db_lpi;
|
irq_hw_number_t vpe_db_lpi;
|
||||||
/* VPE resident */
|
/* VPE resident */
|
||||||
bool resident;
|
bool resident;
|
||||||
|
/* VPT parse complete */
|
||||||
|
bool ready;
|
||||||
union {
|
union {
|
||||||
/* GICv4.0 implementations */
|
/* GICv4.0 implementations */
|
||||||
struct {
|
struct {
|
||||||
@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type {
|
|||||||
PROP_UPDATE_AND_INV_VLPI,
|
PROP_UPDATE_AND_INV_VLPI,
|
||||||
SCHEDULE_VPE,
|
SCHEDULE_VPE,
|
||||||
DESCHEDULE_VPE,
|
DESCHEDULE_VPE,
|
||||||
|
COMMIT_VPE,
|
||||||
INVALL_VPE,
|
INVALL_VPE,
|
||||||
PROP_UPDATE_VSGI,
|
PROP_UPDATE_VSGI,
|
||||||
};
|
};
|
||||||
@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm);
|
|||||||
void its_free_vcpu_irqs(struct its_vm *vm);
|
void its_free_vcpu_irqs(struct its_vm *vm);
|
||||||
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
|
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
|
||||||
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
|
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
|
||||||
|
int its_commit_vpe(struct its_vpe *vpe);
|
||||||
int its_invall_vpe(struct its_vpe *vpe);
|
int its_invall_vpe(struct its_vpe *vpe);
|
||||||
int its_map_vlpi(int irq, struct its_vlpi_map *map);
|
int its_map_vlpi(int irq, struct its_vlpi_map *map);
|
||||||
int its_get_vlpi(int irq, struct its_vlpi_map *map);
|
int its_get_vlpi(int irq, struct its_vlpi_map *map);
|
||||||
|
Loading…
Reference in New Issue
Block a user