KVM: arm/arm64: Merge vgic_set_lr() and vgic_sync_lr_elrsr()

Now we see that vgic_set_lr() and vgic_sync_lr_elrsr() are always used
together. Merge them into one function, saving from second vgic_ops
dereferencing every time.

Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
This commit is contained in:
Pavel Fedin 2015-10-27 11:37:31 +03:00 committed by Christoffer Dall
parent 212c76545d
commit 26caea7693
4 changed files with 2 additions and 23 deletions

View File

@ -112,7 +112,6 @@ struct vgic_vmcr {
struct vgic_ops { struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
void (*clear_eisr)(struct kvm_vcpu *vcpu); void (*clear_eisr)(struct kvm_vcpu *vcpu);

View File

@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT); lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
}
static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else else
@ -167,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = { static const struct vgic_ops vgic_v2_ops = {
.get_lr = vgic_v2_get_lr, .get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr, .set_lr = vgic_v2_set_lr,
.sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr, .get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr, .get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr, .clear_eisr = vgic_v2_clear_eisr,

View File

@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
} }
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
}
static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else else
@ -212,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = { static const struct vgic_ops vgic_v3_ops = {
.get_lr = vgic_v3_get_lr, .get_lr = vgic_v3_get_lr,
.set_lr = vgic_v3_set_lr, .set_lr = vgic_v3_set_lr,
.sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr, .get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr, .get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr, .clear_eisr = vgic_v3_clear_eisr,

View File

@ -1032,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops->set_lr(vcpu, lr, vlr); vgic_ops->set_lr(vcpu, lr, vlr);
} }
static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr vlr)
{
vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
}
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{ {
return vgic_ops->get_elrsr(vcpu); return vgic_ops->get_elrsr(vcpu);
@ -1100,7 +1094,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
vlr.state = 0; vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr); vgic_set_lr(vcpu, lr_nr, vlr);
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
} }
/* /*
@ -1162,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
} }
vgic_set_lr(vcpu, lr_nr, vlr); vgic_set_lr(vcpu, lr_nr, vlr);
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
} }
/* /*
@ -1340,8 +1332,6 @@ static int process_queued_irq(struct kvm_vcpu *vcpu,
vlr.hwirq = 0; vlr.hwirq = 0;
vgic_set_lr(vcpu, lr, vlr); vgic_set_lr(vcpu, lr, vlr);
vgic_sync_lr_elrsr(vcpu, lr, vlr);
return pending; return pending;
} }
@ -1442,8 +1432,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
bool level_pending; bool level_pending;
level_pending = vgic_process_maintenance(vcpu); level_pending = vgic_process_maintenance(vcpu);
elrsr = vgic_get_elrsr(vcpu);
elrsr_ptr = u64_to_bitmask(&elrsr);
/* Deal with HW interrupts, and clear mappings for empty LRs */ /* Deal with HW interrupts, and clear mappings for empty LRs */
for (lr = 0; lr < vgic->nr_lr; lr++) { for (lr = 0; lr < vgic->nr_lr; lr++) {
@ -1454,6 +1442,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
} }
/* Check if we still have something up our sleeve... */ /* Check if we still have something up our sleeve... */
elrsr = vgic_get_elrsr(vcpu);
elrsr_ptr = u64_to_bitmask(&elrsr);
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
if (level_pending || pending < vgic->nr_lr) if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);