forked from Minki/linux
KVM: PPC: Use get/set for to_svcpu to help preemption
When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were doing a few things wrong, most notably access to PACA fields without making sure that the pointers stay stable accross the access (preempt_disable()). This patch moves to_svcpu towards a get/put model which allows us to disable preemption while accessing the shadow vcpu fields in the PACA. That way we can run preemptible and everyone's happy! Reported-by: Jörg Sommer <joerg@alea.gnuu.de> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
d33ad328c0
commit
468a12c2b5
@ -183,7 +183,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
{
|
||||
if ( num < 14 ) {
|
||||
to_svcpu(vcpu)->gpr[num] = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->gpr[num] = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
|
||||
} else
|
||||
vcpu->arch.gpr[num] = val;
|
||||
@ -191,80 +193,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
|
||||
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||
{
|
||||
if ( num < 14 )
|
||||
return to_svcpu(vcpu)->gpr[num];
|
||||
else
|
||||
if ( num < 14 ) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r = svcpu->gpr[num];
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
} else
|
||||
return vcpu->arch.gpr[num];
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
to_svcpu(vcpu)->cr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->cr = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->cr = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->cr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->cr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
to_svcpu(vcpu)->xer = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->xer = val;
|
||||
to_book3s(vcpu)->shadow_vcpu->xer = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->xer;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->xer;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->ctr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->ctr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->ctr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->ctr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->lr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->lr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->lr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->lr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->pc = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->pc = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->pc;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->pc;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
|
||||
|
||||
return svcpu->last_inst;
|
||||
r = svcpu->last_inst;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->fault_dar;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->fault_dar;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||
|
@ -20,11 +20,15 @@
|
||||
#ifndef __ASM_KVM_BOOK3S_32_H__
|
||||
#define __ASM_KVM_BOOK3S_32_H__
|
||||
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_book3s(vcpu)->shadow_vcpu;
|
||||
}
|
||||
|
||||
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
}
|
||||
|
||||
#define PTE_SIZE 12
|
||||
#define VSID_ALL 0
|
||||
#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
|
||||
|
@ -21,10 +21,16 @@
|
||||
#define __ASM_KVM_BOOK3S_64_H__
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
return &get_paca()->shadow_vcpu;
|
||||
}
|
||||
|
||||
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define SPAPR_TCE_SHIFT 12
|
||||
|
@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
bool primary = false;
|
||||
bool evict = false;
|
||||
struct hpte_cache *pte;
|
||||
int r = 0;
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||
if (is_error_pfn(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
|
||||
orig_pte->eaddr);
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
hpaddr <<= PAGE_SHIFT;
|
||||
|
||||
@ -249,7 +251,8 @@ next_pteg:
|
||||
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
||||
u64 gvsid;
|
||||
u32 sr;
|
||||
struct kvmppc_sid_map *map;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
int r = 0;
|
||||
|
||||
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
|
||||
/* Invalidate an entry */
|
||||
svcpu->sr[esid] = SR_INVALID;
|
||||
return -ENOENT;
|
||||
r = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map = find_sid_vsid(vcpu, gvsid);
|
||||
@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
||||
|
||||
dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
|
||||
dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
|
||||
for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
|
||||
svcpu->sr[i] = SR_INVALID;
|
||||
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
int vflags = 0;
|
||||
int attempt = 0;
|
||||
struct kvmppc_sid_map *map;
|
||||
int r = 0;
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||
if (is_error_pfn(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
hpaddr <<= PAGE_SHIFT;
|
||||
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
|
||||
@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
|
||||
vsid, orig_pte->eaddr);
|
||||
WARN_ON(true);
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vsid = map->host_vsid;
|
||||
@ -131,8 +134,10 @@ map_again:
|
||||
|
||||
/* In case we tried normal mapping already, let's nuke old entries */
|
||||
if (attempt > 1)
|
||||
if (ppc_md.hpte_remove(hpteg) < 0)
|
||||
return -1;
|
||||
if (ppc_md.hpte_remove(hpteg) < 0) {
|
||||
r = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
|
||||
|
||||
@ -162,7 +167,8 @@ map_again:
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
|
||||
static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
int i;
|
||||
int max_slb_size = 64;
|
||||
int found_inval = -1;
|
||||
int r;
|
||||
|
||||
if (!to_svcpu(vcpu)->slb_max)
|
||||
to_svcpu(vcpu)->slb_max = 1;
|
||||
if (!svcpu->slb_max)
|
||||
svcpu->slb_max = 1;
|
||||
|
||||
/* Are we overwriting? */
|
||||
for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
|
||||
if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
|
||||
for (i = 1; i < svcpu->slb_max; i++) {
|
||||
if (!(svcpu->slb[i].esid & SLB_ESID_V))
|
||||
found_inval = i;
|
||||
else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
|
||||
return i;
|
||||
else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
|
||||
r = i;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Found a spare entry that was invalidated before */
|
||||
if (found_inval > 0)
|
||||
return found_inval;
|
||||
if (found_inval > 0) {
|
||||
r = found_inval;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* No spare invalid entry, so create one */
|
||||
|
||||
@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
|
||||
max_slb_size = mmu_slb_size;
|
||||
|
||||
/* Overflowing -> purge */
|
||||
if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
|
||||
if ((svcpu->slb_max) == max_slb_size)
|
||||
kvmppc_mmu_flush_segments(vcpu);
|
||||
|
||||
r = to_svcpu(vcpu)->slb_max;
|
||||
to_svcpu(vcpu)->slb_max++;
|
||||
r = svcpu->slb_max;
|
||||
svcpu->slb_max++;
|
||||
|
||||
out:
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u64 esid = eaddr >> SID_SHIFT;
|
||||
u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
|
||||
u64 slb_vsid = SLB_VSID_USER;
|
||||
u64 gvsid;
|
||||
int slb_index;
|
||||
struct kvmppc_sid_map *map;
|
||||
int r = 0;
|
||||
|
||||
slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
|
||||
|
||||
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
|
||||
/* Invalidate an entry */
|
||||
to_svcpu(vcpu)->slb[slb_index].esid = 0;
|
||||
return -ENOENT;
|
||||
svcpu->slb[slb_index].esid = 0;
|
||||
r = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map = find_sid_vsid(vcpu, gvsid);
|
||||
@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
||||
slb_vsid &= ~SLB_VSID_KP;
|
||||
slb_esid |= slb_index;
|
||||
|
||||
to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
|
||||
to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
|
||||
svcpu->slb[slb_index].esid = slb_esid;
|
||||
svcpu->slb[slb_index].vsid = slb_vsid;
|
||||
|
||||
trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
to_svcpu(vcpu)->slb_max = 1;
|
||||
to_svcpu(vcpu)->slb[0].esid = 0;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->slb_max = 1;
|
||||
svcpu->slb[0].esid = 0;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
@ -230,9 +230,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
|
||||
if ((r == -ENOENT) || (r == -EPERM)) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
|
||||
svcpu = svcpu_get(vcpu);
|
||||
*advance = 0;
|
||||
vcpu->arch.shared->dar = vaddr;
|
||||
to_svcpu(vcpu)->fault_dar = vaddr;
|
||||
svcpu->fault_dar = vaddr;
|
||||
|
||||
dsisr = DSISR_ISSTORE;
|
||||
if (r == -ENOENT)
|
||||
@ -241,7 +244,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
dsisr |= DSISR_PROTFAULT;
|
||||
|
||||
vcpu->arch.shared->dsisr = dsisr;
|
||||
to_svcpu(vcpu)->fault_dsisr = dsisr;
|
||||
svcpu->fault_dsisr = dsisr;
|
||||
svcpu_put(svcpu);
|
||||
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_DATA_STORAGE);
|
||||
|
@ -56,10 +56,12 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
||||
memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
|
||||
sizeof(get_paca()->shadow_vcpu));
|
||||
to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
@ -70,10 +72,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
|
||||
sizeof(get_paca()->shadow_vcpu));
|
||||
to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP);
|
||||
@ -308,19 +312,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
if (page_found == -ENOENT) {
|
||||
/* Page not found in guest PTE entries */
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
||||
vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
|
||||
vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.shared->msr |=
|
||||
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
|
||||
(svcpu->shadow_srr1 & 0x00000000f8000000ULL);
|
||||
svcpu_put(svcpu);
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||
} else if (page_found == -EPERM) {
|
||||
/* Storage protection */
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
||||
vcpu->arch.shared->dsisr =
|
||||
to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
|
||||
vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
|
||||
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
|
||||
vcpu->arch.shared->msr |=
|
||||
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
|
||||
svcpu->shadow_srr1 & 0x00000000f8000000ULL;
|
||||
svcpu_put(svcpu);
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||
} else if (page_found == -EINVAL) {
|
||||
/* Page not found in guest SLB */
|
||||
@ -521,21 +528,25 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
kvm_resched(vcpu);
|
||||
switch (exit_nr) {
|
||||
case BOOK3S_INTERRUPT_INST_STORAGE:
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong shadow_srr1 = svcpu->shadow_srr1;
|
||||
vcpu->stat.pf_instruc++;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/* We set segments as unused segments when invalidating them. So
|
||||
* treat the respective fault as segment fault. */
|
||||
if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
|
||||
== SR_INVALID) {
|
||||
if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
||||
r = RESUME_GUEST;
|
||||
svcpu_put(svcpu);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
svcpu_put(svcpu);
|
||||
|
||||
/* only care about PTEG not found errors, but leave NX alone */
|
||||
if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
|
||||
if (shadow_srr1 & 0x40000000) {
|
||||
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
|
||||
vcpu->stat.sp_instruc++;
|
||||
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
||||
@ -548,33 +559,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
||||
r = RESUME_GUEST;
|
||||
} else {
|
||||
vcpu->arch.shared->msr |=
|
||||
to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
|
||||
vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BOOK3S_INTERRUPT_DATA_STORAGE:
|
||||
{
|
||||
ulong dar = kvmppc_get_fault_dar(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 fault_dsisr = svcpu->fault_dsisr;
|
||||
vcpu->stat.pf_storage++;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/* We set segments as unused segments when invalidating them. So
|
||||
* treat the respective fault as segment fault. */
|
||||
if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
|
||||
if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, dar);
|
||||
r = RESUME_GUEST;
|
||||
svcpu_put(svcpu);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
svcpu_put(svcpu);
|
||||
|
||||
/* The only case we need to handle is missing shadow PTEs */
|
||||
if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
|
||||
if (fault_dsisr & DSISR_NOHPTE) {
|
||||
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
||||
} else {
|
||||
vcpu->arch.shared->dar = dar;
|
||||
vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
|
||||
vcpu->arch.shared->dsisr = fault_dsisr;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
@ -610,10 +625,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
case BOOK3S_INTERRUPT_PROGRAM:
|
||||
{
|
||||
enum emulation_result er;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
ulong flags;
|
||||
|
||||
program_interrupt:
|
||||
flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
|
||||
svcpu = svcpu_get(vcpu);
|
||||
flags = svcpu->shadow_srr1 & 0x1f0000ull;
|
||||
svcpu_put(svcpu);
|
||||
|
||||
if (vcpu->arch.shared->msr & MSR_PR) {
|
||||
#ifdef EXIT_DEBUG
|
||||
@ -741,14 +759,18 @@ program_interrupt:
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
default:
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong shadow_srr1 = svcpu->shadow_srr1;
|
||||
svcpu_put(svcpu);
|
||||
/* Ugh - bork here! What did we get? */
|
||||
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
|
||||
exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
|
||||
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
|
||||
r = RESUME_HOST;
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!(r & RESUME_HOST)) {
|
||||
/* To avoid clobbering exit_reason, only check for signals if
|
||||
|
@ -118,11 +118,14 @@ TRACE_EVENT(kvm_book3s_exit,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
__entry->exit_nr = exit_nr;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
||||
__entry->msr = vcpu->arch.shared->msr;
|
||||
__entry->srr1 = to_svcpu(vcpu)->shadow_srr1;
|
||||
svcpu = svcpu_get(vcpu);
|
||||
__entry->srr1 = svcpu->shadow_srr1;
|
||||
svcpu_put(svcpu);
|
||||
),
|
||||
|
||||
TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
|
||||
|
Loading…
Reference in New Issue
Block a user