irqchip/gic-v4.1: Move doorbell management to the GICv4 abstraction layer

In order to hide some of the differences between v4.0 and v4.1, move
the doorbell management out of the KVM code, and into the GICv4-specific
layer. This allows the calling code to ask for the doorbell when blocking,
and otherwise to leave the doorbell permanently disabled.

This matches the v4.1 code perfectly, and only results in a minor
refactoring of the v4.0 code.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
Link: https://lore.kernel.org/r/20200304203330.4967-14-maz@kernel.org
This commit is contained in:
Marc Zyngier 2020-03-04 20:33:20 +00:00
parent 05d32df13c
commit ae699ad348
5 changed files with 61 additions and 26 deletions

View File

@ -87,6 +87,11 @@ static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops; static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
int its_alloc_vcpu_irqs(struct its_vm *vm) int its_alloc_vcpu_irqs(struct its_vm *vm)
{ {
int vpe_base_irq, i; int vpe_base_irq, i;
@ -139,18 +144,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info); return irq_set_vcpu_affinity(vpe->irq, info);
} }
int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{ {
struct its_cmd_info info; struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret; int ret;
WARN_ON(preemptible()); WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info); ret = its_send_vpe_cmd(vpe, &info);
if (!ret) if (!ret)
vpe->resident = on; vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = true;
return ret; return ret;
} }

View File

@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */ /* Hardware has GICv4? */
bool has_gicv4; bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */ /* GIC system register CPU interface */
struct static_key_false gicv3_cpuif; struct static_key_false gicv3_cpuif;

View File

@ -125,7 +125,8 @@ struct its_cmd_info {
int its_alloc_vcpu_irqs(struct its_vm *vm); int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map);

View File

@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */ /* GICv4 support? */
if (info->has_v4) { if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n", kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis"); gicv4_enable ? "en" : "dis");
} }

View File

@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must * it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor). * tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the * This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You * irq corresponding to the vcpu, then call its_make_vpe_resident().
* must be in a non-preemptible context. On exit, another call to * You must be in a non-preemptible context. On exit, a call to
* its_schedule_vpe() tells the redistributor that we're done with the * its_make_vpe_non_resident() tells the redistributor that we're done
* vcpu. * with the vcpu.
* *
* Finally, the doorbell handling: Each vcpu is allocated an interrupt * Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is * which will fire each time a VLPI is made pending whilst the vcpu is
@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info; struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */ /* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq); disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{ {
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0; return 0;
/* return its_make_vpe_non_resident(vpe, need_db);
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
} }
int vgic_v4_load(struct kvm_vcpu *vcpu) int vgic_v4_load(struct kvm_vcpu *vcpu)
@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
/* Disabled the doorbell, as we're about to enter the guest */ err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
if (err) if (err)
return err; return err;
/* /*
* Now that the VPE is resident, let's get rid of a potential * Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending. * doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/ */
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
} }
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,