KVM: lapic: reorganize restart_apic_timer
Move the code to cancel the hv timer into the caller, just before it starts the hrtimer. Check availability of the hv timer in start_hv_timer. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									35ee9e48b9
								
							
						
					
					
						commit
						a749e247f7
					
				| @ -1495,17 +1495,21 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); | ||||
| 
 | ||||
| static void cancel_hv_timer(struct kvm_lapic *apic) | ||||
| { | ||||
| 	WARN_ON(!apic->lapic_timer.hv_timer_in_use); | ||||
| 	preempt_disable(); | ||||
| 	kvm_x86_ops->cancel_hv_timer(apic->vcpu); | ||||
| 	apic->lapic_timer.hv_timer_in_use = false; | ||||
| 	preempt_enable(); | ||||
| } | ||||
| 
 | ||||
| static bool __start_hv_timer(struct kvm_lapic *apic) | ||||
| static bool start_hv_timer(struct kvm_lapic *apic) | ||||
| { | ||||
| 	struct kvm_timer *ktimer = &apic->lapic_timer; | ||||
| 	int r; | ||||
| 
 | ||||
| 	if (!kvm_x86_ops->set_hv_timer) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) | ||||
| 		return false; | ||||
| 
 | ||||
| @ -1523,19 +1527,30 @@ static bool __start_hv_timer(struct kvm_lapic *apic) | ||||
| 	 */ | ||||
| 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true); | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static bool start_hv_timer(struct kvm_lapic *apic) | ||||
| static void start_sw_timer(struct kvm_lapic *apic) | ||||
| { | ||||
| 	if (!__start_hv_timer(apic)) { | ||||
| 		if (apic->lapic_timer.hv_timer_in_use) | ||||
| 			cancel_hv_timer(apic); | ||||
| 	} | ||||
| 	struct kvm_timer *ktimer = &apic->lapic_timer; | ||||
| 	if (apic->lapic_timer.hv_timer_in_use) | ||||
| 		cancel_hv_timer(apic); | ||||
| 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) | ||||
| 		return; | ||||
| 
 | ||||
| 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, | ||||
| 			apic->lapic_timer.hv_timer_in_use); | ||||
| 	return apic->lapic_timer.hv_timer_in_use; | ||||
| 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) | ||||
| 		start_sw_period(apic); | ||||
| 	else if (apic_lvtt_tscdeadline(apic)) | ||||
| 		start_sw_tscdeadline(apic); | ||||
| 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false); | ||||
| } | ||||
| 
 | ||||
| static void restart_apic_timer(struct kvm_lapic *apic) | ||||
| { | ||||
| 	if (!start_hv_timer(apic)) | ||||
| 		start_sw_timer(apic); | ||||
| } | ||||
| 
 | ||||
| void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) | ||||
| @ -1549,19 +1564,14 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) | ||||
| 
 | ||||
| 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) { | ||||
| 		advance_periodic_target_expiration(apic); | ||||
| 		if (!start_hv_timer(apic)) | ||||
| 			start_sw_period(apic); | ||||
| 		restart_apic_timer(apic); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); | ||||
| 
 | ||||
| void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_lapic *apic = vcpu->arch.apic; | ||||
| 
 | ||||
| 	WARN_ON(apic->lapic_timer.hv_timer_in_use); | ||||
| 
 | ||||
| 	start_hv_timer(apic); | ||||
| 	restart_apic_timer(vcpu->arch.apic); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer); | ||||
| 
 | ||||
| @ -1570,33 +1580,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) | ||||
| 	struct kvm_lapic *apic = vcpu->arch.apic; | ||||
| 
 | ||||
| 	/* Possibly the TSC deadline timer is not enabled yet */ | ||||
| 	if (!apic->lapic_timer.hv_timer_in_use) | ||||
| 		return; | ||||
| 
 | ||||
| 	cancel_hv_timer(apic); | ||||
| 
 | ||||
| 	if (atomic_read(&apic->lapic_timer.pending)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) | ||||
| 		start_sw_period(apic); | ||||
| 	else if (apic_lvtt_tscdeadline(apic)) | ||||
| 		start_sw_tscdeadline(apic); | ||||
| 	if (apic->lapic_timer.hv_timer_in_use) | ||||
| 		start_sw_timer(apic); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer); | ||||
| 
 | ||||
| void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_lapic *apic = vcpu->arch.apic; | ||||
| 
 | ||||
| 	WARN_ON(!apic->lapic_timer.hv_timer_in_use); | ||||
| 	restart_apic_timer(apic); | ||||
| } | ||||
| 
 | ||||
| static void start_apic_timer(struct kvm_lapic *apic) | ||||
| { | ||||
| 	atomic_set(&apic->lapic_timer.pending, 0); | ||||
| 
 | ||||
| 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { | ||||
| 		if (set_target_expiration(apic) && | ||||
| 			!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic))) | ||||
| 			start_sw_period(apic); | ||||
| 	} else if (apic_lvtt_tscdeadline(apic)) { | ||||
| 		if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic))) | ||||
| 			start_sw_tscdeadline(apic); | ||||
| 	} | ||||
| 	if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) | ||||
| 	    && !set_target_expiration(apic)) | ||||
| 		return; | ||||
| 
 | ||||
| 	restart_apic_timer(apic); | ||||
| } | ||||
| 
 | ||||
| static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) | ||||
| @ -1827,16 +1832,6 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) | ||||
|  * LAPIC interface | ||||
|  *---------------------------------------------------------------------- | ||||
|  */ | ||||
| u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_lapic *apic = vcpu->arch.apic; | ||||
| 
 | ||||
| 	if (!lapic_in_kernel(vcpu)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	return apic->lapic_timer.tscdeadline; | ||||
| } | ||||
| 
 | ||||
| u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_lapic *apic = vcpu->arch.apic; | ||||
|  | ||||
| @ -87,7 +87,6 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); | ||||
| int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); | ||||
| int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); | ||||
| 
 | ||||
| u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu); | ||||
| u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); | ||||
| void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); | ||||
| 
 | ||||
| @ -216,4 +215,5 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu); | ||||
| void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu); | ||||
| void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu); | ||||
| bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu); | ||||
| void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu); | ||||
| #endif | ||||
|  | ||||
| @ -2841,10 +2841,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||||
| 			kvm_vcpu_write_tsc_offset(vcpu, offset); | ||||
| 			vcpu->arch.tsc_catchup = 1; | ||||
| 		} | ||||
| 		if (kvm_lapic_hv_timer_in_use(vcpu) && | ||||
| 				kvm_x86_ops->set_hv_timer(vcpu, | ||||
| 					kvm_get_lapic_target_expiration_tsc(vcpu))) | ||||
| 			kvm_lapic_switch_to_sw_timer(vcpu); | ||||
| 
 | ||||
| 		if (kvm_lapic_hv_timer_in_use(vcpu)) | ||||
| 			kvm_lapic_restart_hv_timer(vcpu); | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * On a host with synchronized TSC, there is no need to update | ||||
| 		 * kvmclock on vcpu->cpu migration | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user