KVM: remove kvm_guest_enter/exit wrappers
Use the functions from context_tracking.h directly. Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ebaac17362
commit
6edaa5307f
@ -615,7 +615,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
* Enter the guest
|
* Enter the guest
|
||||||
*/
|
*/
|
||||||
trace_kvm_entry(*vcpu_pc(vcpu));
|
trace_kvm_entry(*vcpu_pc(vcpu));
|
||||||
__kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
vcpu->mode = IN_GUEST_MODE;
|
vcpu->mode = IN_GUEST_MODE;
|
||||||
|
|
||||||
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
|
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
|
||||||
@ -641,14 +641,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do local_irq_enable() before calling kvm_guest_exit() so
|
* We do local_irq_enable() before calling guest_exit() so
|
||||||
* that if a timer interrupt hits while running the guest we
|
* that if a timer interrupt hits while running the guest we
|
||||||
* account that tick as being spent in the guest. We enable
|
* account that tick as being spent in the guest. We enable
|
||||||
* preemption after calling kvm_guest_exit() so that if we get
|
* preemption after calling guest_exit() so that if we get
|
||||||
* preempted we make sure ticks after that is not counted as
|
* preempted we make sure ticks after that is not counted as
|
||||||
* guest time.
|
* guest time.
|
||||||
*/
|
*/
|
||||||
kvm_guest_exit();
|
guest_exit();
|
||||||
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -406,7 +406,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
kvm_mips_deliver_interrupts(vcpu,
|
kvm_mips_deliver_interrupts(vcpu,
|
||||||
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
||||||
|
|
||||||
__kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
|
|
||||||
/* Disable hardware page table walking while in guest */
|
/* Disable hardware page table walking while in guest */
|
||||||
htw_stop();
|
htw_stop();
|
||||||
@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
/* Re-enable HTW before enabling interrupts */
|
/* Re-enable HTW before enabling interrupts */
|
||||||
htw_start();
|
htw_start();
|
||||||
|
|
||||||
__kvm_guest_exit();
|
guest_exit_irqoff();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
if (vcpu->sigset_active)
|
if (vcpu->sigset_active)
|
||||||
|
@ -2522,7 +2522,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||||||
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
|
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
|
||||||
spin_unlock(&pvc->lock);
|
spin_unlock(&pvc->lock);
|
||||||
|
|
||||||
kvm_guest_enter();
|
guest_enter();
|
||||||
|
|
||||||
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
||||||
|
|
||||||
@ -2570,7 +2570,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||||||
|
|
||||||
/* make sure updates to secondary vcpu structs are visible now */
|
/* make sure updates to secondary vcpu structs are visible now */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
kvm_guest_exit();
|
guest_exit();
|
||||||
|
|
||||||
for (sub = 0; sub < core_info.n_subcores; ++sub)
|
for (sub = 0; sub < core_info.n_subcores; ++sub)
|
||||||
list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
|
list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
|
||||||
|
@ -914,7 +914,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
/* We get here with MSR.EE=1 */
|
/* We get here with MSR.EE=1 */
|
||||||
|
|
||||||
trace_kvm_exit(exit_nr, vcpu);
|
trace_kvm_exit(exit_nr, vcpu);
|
||||||
kvm_guest_exit();
|
guest_exit();
|
||||||
|
|
||||||
switch (exit_nr) {
|
switch (exit_nr) {
|
||||||
case BOOK3S_INTERRUPT_INST_STORAGE:
|
case BOOK3S_INTERRUPT_INST_STORAGE:
|
||||||
@ -1531,7 +1531,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
kvmppc_clear_debug(vcpu);
|
kvmppc_clear_debug(vcpu);
|
||||||
|
|
||||||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
/* No need for guest_exit. It's done in handle_exit.
|
||||||
We also get here with interrupts enabled. */
|
We also get here with interrupts enabled. */
|
||||||
|
|
||||||
/* Make sure we save the guest FPU/Altivec/VSX state */
|
/* Make sure we save the guest FPU/Altivec/VSX state */
|
||||||
|
@ -776,7 +776,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||||
|
|
||||||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
/* No need for guest_exit. It's done in handle_exit.
|
||||||
We also get here with interrupts enabled. */
|
We also get here with interrupts enabled. */
|
||||||
|
|
||||||
/* Switch back to user space debug context */
|
/* Switch back to user space debug context */
|
||||||
@ -1012,7 +1012,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
trace_kvm_exit(exit_nr, vcpu);
|
trace_kvm_exit(exit_nr, vcpu);
|
||||||
__kvm_guest_exit();
|
guest_exit_irqoff();
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
__kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2623,14 +2623,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
* guest_enter and guest_exit should be no uaccess.
|
* guest_enter and guest_exit should be no uaccess.
|
||||||
*/
|
*/
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
__kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
__disable_cpu_timer_accounting(vcpu);
|
__disable_cpu_timer_accounting(vcpu);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
exit_reason = sie64a(vcpu->arch.sie_block,
|
exit_reason = sie64a(vcpu->arch.sie_block,
|
||||||
vcpu->run->s.regs.gprs);
|
vcpu->run->s.regs.gprs);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
__enable_cpu_timer_accounting(vcpu);
|
__enable_cpu_timer_accounting(vcpu);
|
||||||
__kvm_guest_exit();
|
guest_exit_irqoff();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
|
||||||
|
@ -765,13 +765,13 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|||||||
|
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
|
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
kvm_guest_exit();
|
guest_exit_irqoff();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
|
||||||
|
@ -6658,7 +6658,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
trace_kvm_entry(vcpu->vcpu_id);
|
trace_kvm_entry(vcpu->vcpu_id);
|
||||||
wait_lapic_expire(vcpu);
|
wait_lapic_expire(vcpu);
|
||||||
__kvm_guest_enter();
|
guest_enter_irqoff();
|
||||||
|
|
||||||
if (unlikely(vcpu->arch.switch_db_regs)) {
|
if (unlikely(vcpu->arch.switch_db_regs)) {
|
||||||
set_debugreg(0, 7);
|
set_debugreg(0, 7);
|
||||||
@ -6717,7 +6717,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
kvm_guest_exit();
|
guest_exit();
|
||||||
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
|
@ -875,28 +875,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* must be called with irqs disabled */
|
|
||||||
static inline void __kvm_guest_enter(void)
|
|
||||||
{
|
|
||||||
guest_enter_irqoff();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* must be called with irqs disabled */
|
|
||||||
static inline void __kvm_guest_exit(void)
|
|
||||||
{
|
|
||||||
guest_exit_irqoff();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_guest_enter(void)
|
|
||||||
{
|
|
||||||
guest_enter();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_guest_exit(void)
|
|
||||||
{
|
|
||||||
guest_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* search_memslots() and __gfn_to_memslot() are here because they are
|
* search_memslots() and __gfn_to_memslot() are here because they are
|
||||||
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
|
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
|
||||||
|
Loading…
Reference in New Issue
Block a user