KVM: x86: Move "apicv_active" into "struct kvm_lapic"
Move the per-vCPU apicv_active flag into KVM's local APIC instance. APICv is fully dependent on an in-kernel local APIC, but that's not at all clear when reading the current code due to the flag being stored in the generic kvm_vcpu_arch struct. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220614230548.3852141-5-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ae801e1303
commit
ce0a58f475
@ -663,7 +663,6 @@ struct kvm_vcpu_arch {
|
|||||||
u64 efer;
|
u64 efer;
|
||||||
u64 apic_base;
|
u64 apic_base;
|
||||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||||
bool apicv_active;
|
|
||||||
bool load_eoi_exitmap_pending;
|
bool load_eoi_exitmap_pending;
|
||||||
DECLARE_BITMAP(ioapic_handled_vectors, 256);
|
DECLARE_BITMAP(ioapic_handled_vectors, 256);
|
||||||
unsigned long apic_attention;
|
unsigned long apic_attention;
|
||||||
|
@ -519,14 +519,11 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
|
|||||||
|
|
||||||
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
if (unlikely(apic->apicv_active)) {
|
||||||
|
|
||||||
vcpu = apic->vcpu;
|
|
||||||
|
|
||||||
if (unlikely(vcpu->arch.apicv_active)) {
|
|
||||||
/* need to update RVI */
|
/* need to update RVI */
|
||||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||||
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
|
static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
|
||||||
|
apic_find_highest_irr(apic));
|
||||||
} else {
|
} else {
|
||||||
apic->irr_pending = false;
|
apic->irr_pending = false;
|
||||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||||
@ -543,19 +540,15 @@ EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
|
|||||||
|
|
||||||
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
|
|
||||||
if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
|
if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vcpu = apic->vcpu;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With APIC virtualization enabled, all caching is disabled
|
* With APIC virtualization enabled, all caching is disabled
|
||||||
* because the processor can modify ISR under the hood. Instead
|
* because the processor can modify ISR under the hood. Instead
|
||||||
* just set SVI.
|
* just set SVI.
|
||||||
*/
|
*/
|
||||||
if (unlikely(vcpu->arch.apicv_active))
|
if (unlikely(apic->apicv_active))
|
||||||
static_call_cond(kvm_x86_hwapic_isr_update)(vec);
|
static_call_cond(kvm_x86_hwapic_isr_update)(vec);
|
||||||
else {
|
else {
|
||||||
++apic->isr_count;
|
++apic->isr_count;
|
||||||
@ -590,12 +583,9 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
|
|||||||
|
|
||||||
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
|
if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vcpu = apic->vcpu;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do get here for APIC virtualization enabled if the guest
|
* We do get here for APIC virtualization enabled if the guest
|
||||||
* uses the Hyper-V APIC enlightenment. In this case we may need
|
* uses the Hyper-V APIC enlightenment. In this case we may need
|
||||||
@ -603,7 +593,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
|||||||
* on the other hand isr_count and highest_isr_cache are unused
|
* on the other hand isr_count and highest_isr_cache are unused
|
||||||
* and must be left alone.
|
* and must be left alone.
|
||||||
*/
|
*/
|
||||||
if (unlikely(vcpu->arch.apicv_active))
|
if (unlikely(apic->apicv_active))
|
||||||
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||||
else {
|
else {
|
||||||
--apic->isr_count;
|
--apic->isr_count;
|
||||||
@ -1584,7 +1574,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
|
|||||||
int vec = reg & APIC_VECTOR_MASK;
|
int vec = reg & APIC_VECTOR_MASK;
|
||||||
void *bitmap = apic->regs + APIC_ISR;
|
void *bitmap = apic->regs + APIC_ISR;
|
||||||
|
|
||||||
if (vcpu->arch.apicv_active)
|
if (apic->apicv_active)
|
||||||
bitmap = apic->regs + APIC_IRR;
|
bitmap = apic->regs + APIC_IRR;
|
||||||
|
|
||||||
if (apic_test_vector(vec, bitmap))
|
if (apic_test_vector(vec, bitmap))
|
||||||
@ -1701,7 +1691,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
|
|||||||
if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
|
if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
|
||||||
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
||||||
|
|
||||||
if (!from_timer_fn && vcpu->arch.apicv_active) {
|
if (!from_timer_fn && apic->apicv_active) {
|
||||||
WARN_ON(kvm_get_running_vcpu() != vcpu);
|
WARN_ON(kvm_get_running_vcpu() != vcpu);
|
||||||
kvm_apic_inject_pending_timer_irqs(apic);
|
kvm_apic_inject_pending_timer_irqs(apic);
|
||||||
return;
|
return;
|
||||||
@ -2379,7 +2369,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||||
|
|
||||||
if (vcpu->arch.apicv_active) {
|
if (apic->apicv_active) {
|
||||||
/* irr_pending is always true when apicv is activated. */
|
/* irr_pending is always true when apicv is activated. */
|
||||||
apic->irr_pending = true;
|
apic->irr_pending = true;
|
||||||
apic->isr_count = 1;
|
apic->isr_count = 1;
|
||||||
@ -2454,7 +2444,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||||||
|
|
||||||
vcpu->arch.pv_eoi.msr_val = 0;
|
vcpu->arch.pv_eoi.msr_val = 0;
|
||||||
apic_update_ppr(apic);
|
apic_update_ppr(apic);
|
||||||
if (vcpu->arch.apicv_active) {
|
if (apic->apicv_active) {
|
||||||
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
|
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||||
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
|
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
|
||||||
static_call_cond(kvm_x86_hwapic_isr_update)(-1);
|
static_call_cond(kvm_x86_hwapic_isr_update)(-1);
|
||||||
@ -2734,7 +2724,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||||||
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
|
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
|
||||||
kvm_apic_update_apicv(vcpu);
|
kvm_apic_update_apicv(vcpu);
|
||||||
apic->highest_isr_cache = -1;
|
apic->highest_isr_cache = -1;
|
||||||
if (vcpu->arch.apicv_active) {
|
if (apic->apicv_active) {
|
||||||
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
|
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||||
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
|
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
|
||||||
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||||
|
@ -48,6 +48,7 @@ struct kvm_lapic {
|
|||||||
struct kvm_timer lapic_timer;
|
struct kvm_timer lapic_timer;
|
||||||
u32 divide_count;
|
u32 divide_count;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
bool apicv_active;
|
||||||
bool sw_enabled;
|
bool sw_enabled;
|
||||||
bool irr_pending;
|
bool irr_pending;
|
||||||
bool lvt0_in_nmi_mode;
|
bool lvt0_in_nmi_mode;
|
||||||
@ -204,7 +205,7 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
|
|||||||
|
|
||||||
static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
|
static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.apic && vcpu->arch.apicv_active;
|
return vcpu->arch.apic && vcpu->arch.apic->apicv_active;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
|
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
|
||||||
|
@ -3465,12 +3465,13 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
|
|||||||
int trig_mode, int vector)
|
int trig_mode, int vector)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* vcpu->arch.apicv_active must be read after vcpu->mode.
|
* apic->apicv_active must be read after vcpu->mode.
|
||||||
* Pairs with smp_store_release in vcpu_enter_guest.
|
* Pairs with smp_store_release in vcpu_enter_guest.
|
||||||
*/
|
*/
|
||||||
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
|
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
|
||||||
|
|
||||||
if (!READ_ONCE(vcpu->arch.apicv_active)) {
|
/* Note, this is called iff the local APIC is in-kernel. */
|
||||||
|
if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
|
||||||
/* Process the interrupt via inject_pending_event */
|
/* Process the interrupt via inject_pending_event */
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
kvm_vcpu_kick(vcpu);
|
kvm_vcpu_kick(vcpu);
|
||||||
|
@ -4099,7 +4099,8 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
|||||||
if (!r)
|
if (!r)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!vcpu->arch.apicv_active)
|
/* Note, this is called iff the local APIC is in-kernel. */
|
||||||
|
if (!vcpu->arch.apic->apicv_active)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
|
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
|
||||||
|
@ -9460,7 +9460,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
|||||||
if (!lapic_in_kernel(vcpu))
|
if (!lapic_in_kernel(vcpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vcpu->arch.apicv_active)
|
if (vcpu->arch.apic->apicv_active)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!vcpu->arch.apic->vapic_addr)
|
if (!vcpu->arch.apic->vapic_addr)
|
||||||
@ -9913,6 +9913,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
|
|||||||
|
|
||||||
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||||
bool activate;
|
bool activate;
|
||||||
|
|
||||||
if (!lapic_in_kernel(vcpu))
|
if (!lapic_in_kernel(vcpu))
|
||||||
@ -9923,10 +9924,10 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
activate = kvm_vcpu_apicv_activated(vcpu);
|
activate = kvm_vcpu_apicv_activated(vcpu);
|
||||||
|
|
||||||
if (vcpu->arch.apicv_active == activate)
|
if (apic->apicv_active == activate)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
vcpu->arch.apicv_active = activate;
|
apic->apicv_active = activate;
|
||||||
kvm_apic_update_apicv(vcpu);
|
kvm_apic_update_apicv(vcpu);
|
||||||
static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
|
static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
|
||||||
|
|
||||||
@ -9936,7 +9937,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
|||||||
* still active when the interrupt got accepted. Make sure
|
* still active when the interrupt got accepted. Make sure
|
||||||
* inject_pending_event() is called to check for that.
|
* inject_pending_event() is called to check for that.
|
||||||
*/
|
*/
|
||||||
if (!vcpu->arch.apicv_active)
|
if (!apic->apicv_active)
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -11379,7 +11380,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|||||||
* will ensure the vCPU gets the correct state before VM-Entry.
|
* will ensure the vCPU gets the correct state before VM-Entry.
|
||||||
*/
|
*/
|
||||||
if (enable_apicv) {
|
if (enable_apicv) {
|
||||||
vcpu->arch.apicv_active = true;
|
vcpu->arch.apic->apicv_active = true;
|
||||||
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
Loading…
Reference in New Issue
Block a user