mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 08:31:37 +00:00
KVM: MIPS/T&E: Add lockless GVA access helpers
Add helpers to allow for lockless direct access to the GVA space, by changing the VCPU mode to READING_SHADOW_PAGE_TABLES for the duration of the access. This allows asynchronous TLB flush requests in future patches to safely trigger either a TLB flush before the direct GVA space access, or a delay until the in-progress lockless direct access is complete. The kvm_trap_emul_gva_lockless_begin() and kvm_trap_emul_gva_lockless_end() helpers take care of guarding the direct GVA accesses, and kvm_trap_emul_gva_fault() tries to handle a uaccess fault resulting from a flush having taken place. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
This commit is contained in:
parent
91737ea205
commit
1880afd605
@ -243,6 +243,7 @@ enum emulation_result {
|
|||||||
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
|
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
|
||||||
#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
|
#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
|
||||||
#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
|
#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
|
||||||
|
#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
|
||||||
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
|
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
|
||||||
((y) & VPN2_MASK & ~(x).tlb_mask))
|
((y) & VPN2_MASK & ~(x).tlb_mask))
|
||||||
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
|
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
|
||||||
@ -640,6 +641,20 @@ pgd_t *kvm_pgd_alloc(void);
|
|||||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||||
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
|
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
|
||||||
bool user);
|
bool user);
|
||||||
|
void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
|
||||||
|
void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
enum kvm_mips_fault_result {
|
||||||
|
KVM_MIPS_MAPPED = 0,
|
||||||
|
KVM_MIPS_GVA,
|
||||||
|
KVM_MIPS_GPA,
|
||||||
|
KVM_MIPS_TLB,
|
||||||
|
KVM_MIPS_TLBINV,
|
||||||
|
KVM_MIPS_TLBMOD,
|
||||||
|
};
|
||||||
|
enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
||||||
|
unsigned long gva,
|
||||||
|
bool write);
|
||||||
|
|
||||||
/* Emulation */
|
/* Emulation */
|
||||||
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
|
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
|
||||||
|
@ -732,6 +732,57 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
|
||||||
|
* @vcpu: Virtual CPU.
|
||||||
|
* @gva: Guest virtual address to be accessed.
|
||||||
|
* @write: True if write attempted (must be dirtied and made writable).
|
||||||
|
*
|
||||||
|
* Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
|
||||||
|
* dirtying the page if @write so that guest instructions can be modified.
|
||||||
|
*
|
||||||
|
* Returns: KVM_MIPS_MAPPED on success.
|
||||||
|
* KVM_MIPS_GVA if bad guest virtual address.
|
||||||
|
* KVM_MIPS_GPA if bad guest physical address.
|
||||||
|
* KVM_MIPS_TLB if guest TLB not present.
|
||||||
|
* KVM_MIPS_TLBINV if guest TLB present but not valid.
|
||||||
|
* KVM_MIPS_TLBMOD if guest TLB read only.
|
||||||
|
*/
|
||||||
|
enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
||||||
|
unsigned long gva,
|
||||||
|
bool write)
|
||||||
|
{
|
||||||
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||||
|
struct kvm_mips_tlb *tlb;
|
||||||
|
int index;
|
||||||
|
|
||||||
|
if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
|
||||||
|
if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu) < 0)
|
||||||
|
return KVM_MIPS_GPA;
|
||||||
|
} else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
|
||||||
|
KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
|
||||||
|
/* Address should be in the guest TLB */
|
||||||
|
index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
|
||||||
|
(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
|
||||||
|
if (index < 0)
|
||||||
|
return KVM_MIPS_TLB;
|
||||||
|
tlb = &vcpu->arch.guest_tlb[index];
|
||||||
|
|
||||||
|
/* Entry should be valid, and dirty for writes */
|
||||||
|
if (!TLB_IS_VALID(*tlb, gva))
|
||||||
|
return KVM_MIPS_TLBINV;
|
||||||
|
if (write && !TLB_IS_DIRTY(*tlb, gva))
|
||||||
|
return KVM_MIPS_TLBMOD;
|
||||||
|
|
||||||
|
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva))
|
||||||
|
return KVM_MIPS_GPA;
|
||||||
|
} else {
|
||||||
|
return KVM_MIPS_GVA;
|
||||||
|
}
|
||||||
|
|
||||||
|
return KVM_MIPS_MAPPED;
|
||||||
|
}
|
||||||
|
|
||||||
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -786,6 +786,71 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
|
||||||
|
* @vcpu: VCPU pointer.
|
||||||
|
*
|
||||||
|
* Call before a GVA space access outside of guest mode, to ensure that
|
||||||
|
* asynchronous TLB flush requests are handled or delayed until completion of
|
||||||
|
* the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
|
||||||
|
*
|
||||||
|
* Should be called with IRQs already enabled.
|
||||||
|
*/
|
||||||
|
void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
|
||||||
|
WARN_ON_ONCE(irqs_disabled());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller is about to access the GVA space, so we set the mode to
|
||||||
|
* force TLB flush requests to send an IPI, and also disable IRQs to
|
||||||
|
* delay IPI handling until kvm_trap_emul_gva_lockless_end().
|
||||||
|
*/
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the read of VCPU requests is not reordered ahead of the
|
||||||
|
* write to vcpu->mode, or we could miss a TLB flush request while
|
||||||
|
* the requester sees the VCPU as outside of guest mode and not needing
|
||||||
|
* an IPI.
|
||||||
|
*/
|
||||||
|
smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a TLB flush has been requested (potentially while
|
||||||
|
* OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
|
||||||
|
* before accessing the GVA space, and be sure to reload the ASID if
|
||||||
|
* necessary as it'll be immediately used.
|
||||||
|
*
|
||||||
|
* TLB flush requests after this check will trigger an IPI due to the
|
||||||
|
* mode change above, which will be delayed due to IRQs disabled.
|
||||||
|
*/
|
||||||
|
kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
|
||||||
|
* @vcpu: VCPU pointer.
|
||||||
|
*
|
||||||
|
* Called after a GVA space access outside of guest mode. Should have a matching
|
||||||
|
* call to kvm_trap_emul_gva_lockless_begin().
|
||||||
|
*/
|
||||||
|
void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Make sure the write to vcpu->mode is not reordered in front of GVA
|
||||||
|
* accesses, or a TLB flush requester may not think it necessary to send
|
||||||
|
* an IPI.
|
||||||
|
*/
|
||||||
|
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that the access to GVA space is complete, its safe for pending
|
||||||
|
* TLB flush request IPIs to be handled (which indicates completion).
|
||||||
|
*/
|
||||||
|
local_irq_enable();
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
||||||
struct kvm_vcpu *vcpu)
|
struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user