mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 17:51:43 +00:00
KVM: PPC: Book3S HV: Handle guest-caused machine checks on POWER7 without panicking
Currently, if a machine check interrupt happens while we are in the guest, we exit the guest and call the host's machine check handler, which tends to cause the host to panic. Some machine checks can be triggered by the guest; for example, if the guest creates two entries in the SLB that map the same effective address, and then accesses that effective address, the CPU will take a machine check interrupt. To handle this better, when a machine check happens inside the guest, we call a new function, kvmppc_realmode_machine_check(), while still in real mode before exiting the guest. On POWER7, it handles the cases that the guest can trigger, either by flushing and reloading the SLB, or by flushing the TLB, and then it delivers the machine check interrupt directly to the guest without going back to the host. On POWER7, the OPAL firmware patches the machine check interrupt vector so that it gets control first, and it leaves behind its analysis of the situation in a structure pointed to by the opal_mc_evt field of the paca. The kvmppc_realmode_machine_check() function looks at this, and if OPAL reports that there was no error, or that it has handled the error, we also go straight back to the guest with a machine check. We have to deliver a machine check to the guest since the machine check interrupt might have trashed valid values in SRR0/1. If the machine check is one we can't handle in real mode, and one that OPAL hasn't already handled, or on PPC970, we exit the guest and call the host's machine check handler. We do this by jumping to the machine_check_fwnmi label, rather than absolute address 0x200, because we don't want to re-execute OPAL's handler on POWER7. On PPC970, the two are equivalent because address 0x200 just contains a branch. Then, if the host machine check handler decides that the system can continue executing, kvmppc_handle_exit() delivers a machine check interrupt to the guest -- once again to let the guest know that SRR0/1 have been modified. Signed-off-by: Paul Mackerras <paulus@samba.org> [agraf: fix checkpatch warnings] Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
1b400ba0cd
commit
b4072df407
@ -121,6 +121,16 @@ extern char initial_stab[];
|
|||||||
#define PP_RXRX 3 /* Supervisor read, User read */
|
#define PP_RXRX 3 /* Supervisor read, User read */
|
||||||
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
|
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
|
||||||
|
|
||||||
|
/* Fields for tlbiel instruction in architecture 2.06 */
|
||||||
|
#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
|
||||||
|
#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
|
||||||
|
#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
|
||||||
|
#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
|
||||||
|
#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
|
||||||
|
#define TLBIEL_INVAL_SET_SHIFT 12
|
||||||
|
|
||||||
|
#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
struct hash_pte {
|
struct hash_pte {
|
||||||
|
@ -73,6 +73,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
|
|||||||
book3s_hv_rmhandlers.o \
|
book3s_hv_rmhandlers.o \
|
||||||
book3s_hv_rm_mmu.o \
|
book3s_hv_rm_mmu.o \
|
||||||
book3s_64_vio_hv.o \
|
book3s_64_vio_hv.o \
|
||||||
|
book3s_hv_ras.o \
|
||||||
book3s_hv_builtin.o
|
book3s_hv_builtin.o
|
||||||
|
|
||||||
kvm-book3s_64-module-objs := \
|
kvm-book3s_64-module-objs := \
|
||||||
|
@ -545,6 +545,17 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
case BOOK3S_INTERRUPT_PERFMON:
|
case BOOK3S_INTERRUPT_PERFMON:
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
|
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
||||||
|
/*
|
||||||
|
* Deliver a machine check interrupt to the guest.
|
||||||
|
* We have to do this, even if the host has handled the
|
||||||
|
* machine check, because machine checks use SRR0/1 and
|
||||||
|
* the interrupt might have trashed guest state in them.
|
||||||
|
*/
|
||||||
|
kvmppc_book3s_queue_irqprio(vcpu,
|
||||||
|
BOOK3S_INTERRUPT_MACHINE_CHECK);
|
||||||
|
r = RESUME_GUEST;
|
||||||
|
break;
|
||||||
case BOOK3S_INTERRUPT_PROGRAM:
|
case BOOK3S_INTERRUPT_PROGRAM:
|
||||||
{
|
{
|
||||||
ulong flags;
|
ulong flags;
|
||||||
|
144
arch/powerpc/kvm/book3s_hv_ras.c
Normal file
144
arch/powerpc/kvm/book3s_hv_ras.c
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
/*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/kvm.h>
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <asm/opal.h>
|
||||||
|
|
||||||
|
/* SRR1 bits for machine check on POWER7 */
|
||||||
|
#define SRR1_MC_LDSTERR (1ul << (63-42))
|
||||||
|
#define SRR1_MC_IFETCH_SH (63-45)
|
||||||
|
#define SRR1_MC_IFETCH_MASK 0x7
|
||||||
|
#define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
|
||||||
|
#define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
|
||||||
|
#define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
|
||||||
|
#define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */
|
||||||
|
|
||||||
|
/* DSISR bits for machine check on POWER7 */
|
||||||
|
#define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
|
||||||
|
#define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
|
||||||
|
#define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
|
||||||
|
#define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
|
||||||
|
#define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */
|
||||||
|
|
||||||
|
/* POWER7 SLB flush and reload */
|
||||||
|
static void reload_slb(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct slb_shadow *slb;
|
||||||
|
unsigned long i, n;
|
||||||
|
|
||||||
|
/* First clear out SLB */
|
||||||
|
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
|
||||||
|
|
||||||
|
/* Do they have an SLB shadow buffer registered? */
|
||||||
|
slb = vcpu->arch.slb_shadow.pinned_addr;
|
||||||
|
if (!slb)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Sanity check */
|
||||||
|
n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
|
||||||
|
if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Load up the SLB from that */
|
||||||
|
for (i = 0; i < n; ++i) {
|
||||||
|
unsigned long rb = slb->save_area[i].esid;
|
||||||
|
unsigned long rs = slb->save_area[i].vsid;
|
||||||
|
|
||||||
|
rb = (rb & ~0xFFFul) | i; /* insert entry number */
|
||||||
|
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* POWER7 TLB flush */
|
||||||
|
static void flush_tlb_power7(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long i, rb;
|
||||||
|
|
||||||
|
rb = TLBIEL_INVAL_SET_LPID;
|
||||||
|
for (i = 0; i < POWER7_TLB_SETS; ++i) {
|
||||||
|
asm volatile("tlbiel %0" : : "r" (rb));
|
||||||
|
rb += 1 << TLBIEL_INVAL_SET_SHIFT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On POWER7, see if we can handle a machine check that occurred inside
|
||||||
|
* the guest in real mode, without switching to the host partition.
|
||||||
|
*
|
||||||
|
* Returns: 0 => exit guest, 1 => deliver machine check to guest
|
||||||
|
*/
|
||||||
|
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long srr1 = vcpu->arch.shregs.msr;
|
||||||
|
struct opal_machine_check_event *opal_evt;
|
||||||
|
long handled = 1;
|
||||||
|
|
||||||
|
if (srr1 & SRR1_MC_LDSTERR) {
|
||||||
|
/* error on load/store */
|
||||||
|
unsigned long dsisr = vcpu->arch.shregs.dsisr;
|
||||||
|
|
||||||
|
if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
|
||||||
|
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
|
||||||
|
/* flush and reload SLB; flushes D-ERAT too */
|
||||||
|
reload_slb(vcpu);
|
||||||
|
dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
|
||||||
|
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
|
||||||
|
}
|
||||||
|
if (dsisr & DSISR_MC_TLB_MULTI) {
|
||||||
|
flush_tlb_power7(vcpu);
|
||||||
|
dsisr &= ~DSISR_MC_TLB_MULTI;
|
||||||
|
}
|
||||||
|
/* Any other errors we don't understand? */
|
||||||
|
if (dsisr & 0xffffffffUL)
|
||||||
|
handled = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
|
||||||
|
case 0:
|
||||||
|
break;
|
||||||
|
case SRR1_MC_IFETCH_SLBPAR:
|
||||||
|
case SRR1_MC_IFETCH_SLBMULTI:
|
||||||
|
case SRR1_MC_IFETCH_SLBPARMULTI:
|
||||||
|
reload_slb(vcpu);
|
||||||
|
break;
|
||||||
|
case SRR1_MC_IFETCH_TLBMULTI:
|
||||||
|
flush_tlb_power7(vcpu);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
handled = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See if OPAL has already handled the condition.
|
||||||
|
* We assume that if the condition is recovered then OPAL
|
||||||
|
* will have generated an error log event that we will pick
|
||||||
|
* up and log later.
|
||||||
|
*/
|
||||||
|
opal_evt = local_paca->opal_mc_evt;
|
||||||
|
if (opal_evt->version == OpalMCE_V1 &&
|
||||||
|
(opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
|
||||||
|
opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
|
||||||
|
handled = 1;
|
||||||
|
|
||||||
|
if (handled)
|
||||||
|
opal_evt->in_use = 0;
|
||||||
|
|
||||||
|
return handled;
|
||||||
|
}
|
||||||
|
|
||||||
|
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||||
|
return kvmppc_realmode_mc_power7(vcpu);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -27,6 +27,7 @@
|
|||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/exception-64s.h>
|
#include <asm/exception-64s.h>
|
||||||
#include <asm/kvm_book3s_asm.h>
|
#include <asm/kvm_book3s_asm.h>
|
||||||
|
#include <asm/mmu-hash64.h>
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
* *
|
* *
|
||||||
@ -678,8 +679,7 @@ BEGIN_FTR_SECTION
|
|||||||
1:
|
1:
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||||
|
|
||||||
nohpte_cont:
|
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
||||||
hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
|
||||||
/* Save DEC */
|
/* Save DEC */
|
||||||
mfspr r5,SPRN_DEC
|
mfspr r5,SPRN_DEC
|
||||||
mftb r6
|
mftb r6
|
||||||
@ -700,6 +700,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||||||
std r6, VCPU_FAULT_DAR(r9)
|
std r6, VCPU_FAULT_DAR(r9)
|
||||||
stw r7, VCPU_FAULT_DSISR(r9)
|
stw r7, VCPU_FAULT_DSISR(r9)
|
||||||
|
|
||||||
|
/* See if it is a machine check */
|
||||||
|
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||||
|
beq machine_check_realmode
|
||||||
|
mc_cont:
|
||||||
|
|
||||||
/* Save guest CTRL register, set runlatch to 1 */
|
/* Save guest CTRL register, set runlatch to 1 */
|
||||||
6: mfspr r6,SPRN_CTRLF
|
6: mfspr r6,SPRN_CTRLF
|
||||||
stw r6,VCPU_CTRL(r9)
|
stw r6,VCPU_CTRL(r9)
|
||||||
@ -1112,38 +1117,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||||||
/*
|
/*
|
||||||
* For external and machine check interrupts, we need
|
* For external and machine check interrupts, we need
|
||||||
* to call the Linux handler to process the interrupt.
|
* to call the Linux handler to process the interrupt.
|
||||||
* We do that by jumping to the interrupt vector address
|
* We do that by jumping to absolute address 0x500 for
|
||||||
* which we have in r12. The [h]rfid at the end of the
|
* external interrupts, or the machine_check_fwnmi label
|
||||||
|
* for machine checks (since firmware might have patched
|
||||||
|
* the vector area at 0x200). The [h]rfid at the end of the
|
||||||
* handler will return to the book3s_hv_interrupts.S code.
|
* handler will return to the book3s_hv_interrupts.S code.
|
||||||
* For other interrupts we do the rfid to get back
|
* For other interrupts we do the rfid to get back
|
||||||
* to the book3s_interrupts.S code here.
|
* to the book3s_hv_interrupts.S code here.
|
||||||
*/
|
*/
|
||||||
ld r8, HSTATE_VMHANDLER(r13)
|
ld r8, HSTATE_VMHANDLER(r13)
|
||||||
ld r7, HSTATE_HOST_MSR(r13)
|
ld r7, HSTATE_HOST_MSR(r13)
|
||||||
|
|
||||||
|
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||||
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
||||||
|
BEGIN_FTR_SECTION
|
||||||
beq 11f
|
beq 11f
|
||||||
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||||
|
|
||||||
/* RFI into the highmem handler, or branch to interrupt handler */
|
/* RFI into the highmem handler, or branch to interrupt handler */
|
||||||
12: mfmsr r6
|
mfmsr r6
|
||||||
mtctr r12
|
|
||||||
li r0, MSR_RI
|
li r0, MSR_RI
|
||||||
andc r6, r6, r0
|
andc r6, r6, r0
|
||||||
mtmsrd r6, 1 /* Clear RI in MSR */
|
mtmsrd r6, 1 /* Clear RI in MSR */
|
||||||
mtsrr0 r8
|
mtsrr0 r8
|
||||||
mtsrr1 r7
|
mtsrr1 r7
|
||||||
beqctr
|
beqa 0x500 /* external interrupt (PPC970) */
|
||||||
|
beq cr1, 13f /* machine check */
|
||||||
RFI
|
RFI
|
||||||
|
|
||||||
11:
|
/* On POWER7, we have external interrupts set to use HSRR0/1 */
|
||||||
BEGIN_FTR_SECTION
|
11: mtspr SPRN_HSRR0, r8
|
||||||
b 12b
|
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|
||||||
mtspr SPRN_HSRR0, r8
|
|
||||||
mtspr SPRN_HSRR1, r7
|
mtspr SPRN_HSRR1, r7
|
||||||
ba 0x500
|
ba 0x500
|
||||||
|
|
||||||
|
13: b machine_check_fwnmi
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether an HDSI is an HPTE not found fault or something else.
|
* Check whether an HDSI is an HPTE not found fault or something else.
|
||||||
* If it is an HPTE not found fault that is due to the guest accessing
|
* If it is an HPTE not found fault that is due to the guest accessing
|
||||||
@ -1176,7 +1184,7 @@ kvmppc_hdsi:
|
|||||||
cmpdi r3, 0 /* retry the instruction */
|
cmpdi r3, 0 /* retry the instruction */
|
||||||
beq 6f
|
beq 6f
|
||||||
cmpdi r3, -1 /* handle in kernel mode */
|
cmpdi r3, -1 /* handle in kernel mode */
|
||||||
beq nohpte_cont
|
beq guest_exit_cont
|
||||||
cmpdi r3, -2 /* MMIO emulation; need instr word */
|
cmpdi r3, -2 /* MMIO emulation; need instr word */
|
||||||
beq 2f
|
beq 2f
|
||||||
|
|
||||||
@ -1190,6 +1198,7 @@ kvmppc_hdsi:
|
|||||||
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
|
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
|
||||||
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
|
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
|
||||||
rotldi r11, r11, 63
|
rotldi r11, r11, 63
|
||||||
|
fast_interrupt_c_return:
|
||||||
6: ld r7, VCPU_CTR(r9)
|
6: ld r7, VCPU_CTR(r9)
|
||||||
lwz r8, VCPU_XER(r9)
|
lwz r8, VCPU_XER(r9)
|
||||||
mtctr r7
|
mtctr r7
|
||||||
@ -1222,7 +1231,7 @@ kvmppc_hdsi:
|
|||||||
/* Unset guest mode. */
|
/* Unset guest mode. */
|
||||||
li r0, KVM_GUEST_MODE_NONE
|
li r0, KVM_GUEST_MODE_NONE
|
||||||
stb r0, HSTATE_IN_GUEST(r13)
|
stb r0, HSTATE_IN_GUEST(r13)
|
||||||
b nohpte_cont
|
b guest_exit_cont
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Similarly for an HISI, reflect it to the guest as an ISI unless
|
* Similarly for an HISI, reflect it to the guest as an ISI unless
|
||||||
@ -1248,9 +1257,9 @@ kvmppc_hisi:
|
|||||||
ld r11, VCPU_MSR(r9)
|
ld r11, VCPU_MSR(r9)
|
||||||
li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
|
li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
|
||||||
cmpdi r3, 0 /* retry the instruction */
|
cmpdi r3, 0 /* retry the instruction */
|
||||||
beq 6f
|
beq fast_interrupt_c_return
|
||||||
cmpdi r3, -1 /* handle in kernel mode */
|
cmpdi r3, -1 /* handle in kernel mode */
|
||||||
beq nohpte_cont
|
beq guest_exit_cont
|
||||||
|
|
||||||
/* Synthesize an ISI for the guest */
|
/* Synthesize an ISI for the guest */
|
||||||
mr r11, r3
|
mr r11, r3
|
||||||
@ -1259,12 +1268,7 @@ kvmppc_hisi:
|
|||||||
li r10, BOOK3S_INTERRUPT_INST_STORAGE
|
li r10, BOOK3S_INTERRUPT_INST_STORAGE
|
||||||
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
|
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
|
||||||
rotldi r11, r11, 63
|
rotldi r11, r11, 63
|
||||||
6: ld r7, VCPU_CTR(r9)
|
b fast_interrupt_c_return
|
||||||
lwz r8, VCPU_XER(r9)
|
|
||||||
mtctr r7
|
|
||||||
mtxer r8
|
|
||||||
mr r4, r9
|
|
||||||
b fast_guest_return
|
|
||||||
|
|
||||||
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
|
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
|
||||||
ld r5, KVM_VRMA_SLB_V(r6)
|
ld r5, KVM_VRMA_SLB_V(r6)
|
||||||
@ -1280,14 +1284,14 @@ kvmppc_hisi:
|
|||||||
hcall_try_real_mode:
|
hcall_try_real_mode:
|
||||||
ld r3,VCPU_GPR(R3)(r9)
|
ld r3,VCPU_GPR(R3)(r9)
|
||||||
andi. r0,r11,MSR_PR
|
andi. r0,r11,MSR_PR
|
||||||
bne hcall_real_cont
|
bne guest_exit_cont
|
||||||
clrrdi r3,r3,2
|
clrrdi r3,r3,2
|
||||||
cmpldi r3,hcall_real_table_end - hcall_real_table
|
cmpldi r3,hcall_real_table_end - hcall_real_table
|
||||||
bge hcall_real_cont
|
bge guest_exit_cont
|
||||||
LOAD_REG_ADDR(r4, hcall_real_table)
|
LOAD_REG_ADDR(r4, hcall_real_table)
|
||||||
lwzx r3,r3,r4
|
lwzx r3,r3,r4
|
||||||
cmpwi r3,0
|
cmpwi r3,0
|
||||||
beq hcall_real_cont
|
beq guest_exit_cont
|
||||||
add r3,r3,r4
|
add r3,r3,r4
|
||||||
mtctr r3
|
mtctr r3
|
||||||
mr r3,r9 /* get vcpu pointer */
|
mr r3,r9 /* get vcpu pointer */
|
||||||
@ -1308,7 +1312,7 @@ hcall_real_fallback:
|
|||||||
li r12,BOOK3S_INTERRUPT_SYSCALL
|
li r12,BOOK3S_INTERRUPT_SYSCALL
|
||||||
ld r9, HSTATE_KVM_VCPU(r13)
|
ld r9, HSTATE_KVM_VCPU(r13)
|
||||||
|
|
||||||
b hcall_real_cont
|
b guest_exit_cont
|
||||||
|
|
||||||
.globl hcall_real_table
|
.globl hcall_real_table
|
||||||
hcall_real_table:
|
hcall_real_table:
|
||||||
@ -1567,6 +1571,21 @@ kvm_cede_exit:
|
|||||||
li r3,H_TOO_HARD
|
li r3,H_TOO_HARD
|
||||||
blr
|
blr
|
||||||
|
|
||||||
|
/* Try to handle a machine check in real mode */
|
||||||
|
machine_check_realmode:
|
||||||
|
mr r3, r9 /* get vcpu pointer */
|
||||||
|
bl .kvmppc_realmode_machine_check
|
||||||
|
nop
|
||||||
|
cmpdi r3, 0 /* continue exiting from guest? */
|
||||||
|
ld r9, HSTATE_KVM_VCPU(r13)
|
||||||
|
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||||
|
beq mc_cont
|
||||||
|
/* If not, deliver a machine check. SRR0/1 are already set */
|
||||||
|
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||||
|
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
|
||||||
|
rotldi r11, r11, 63
|
||||||
|
b fast_interrupt_c_return
|
||||||
|
|
||||||
secondary_too_late:
|
secondary_too_late:
|
||||||
ld r5,HSTATE_KVM_VCORE(r13)
|
ld r5,HSTATE_KVM_VCORE(r13)
|
||||||
HMT_LOW
|
HMT_LOW
|
||||||
|
Loading…
Reference in New Issue
Block a user