KVM: PPC: PV mtmsrd L=1

The PowerPC ISA has a special instruction for mtmsr that only changes the EE
and RI bits, namely the L=1 form.

Since that one is reasonably often occuring and simple to implement, let's
go with this first. Writing EE=0 is always just a store. Doing EE=1 also
requires us to check for pending interrupts and if necessary exit back to the
hypervisor.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Alexander Graf 2010-07-29 14:48:04 +02:00 committed by Avi Kivity
parent 92234722ed
commit 819a63dc79
2 changed files with 101 additions and 0 deletions

View File

@ -63,6 +63,7 @@
#define KVM_INST_MTSPR_DSISR 0x7c1203a6
#define KVM_INST_TLBSYNC 0x7c00046c
#define KVM_INST_MTMSRD_L1 0x7c010164
static bool kvm_patching_worked = true;
static char kvm_tmp[1024 * 1024];
@ -138,6 +139,43 @@ static u32 *kvm_alloc(int len)
return p;
}
extern u32 kvm_emulate_mtmsrd_branch_offs;
extern u32 kvm_emulate_mtmsrd_reg_offs;
extern u32 kvm_emulate_mtmsrd_len;
extern u32 kvm_emulate_mtmsrd[];
static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
/* Modify the chunk to fit the invocation */
memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
p[kvm_emulate_mtmsrd_reg_offs] |= rt;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
static void kvm_map_magic_page(void *data)
{
kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
@ -211,6 +249,13 @@ static void kvm_check_ins(u32 *inst)
case KVM_INST_TLBSYNC:
kvm_patch_ins_nop(inst);
break;
/* Rewrites */
case KVM_INST_MTMSRD_L1:
/* We use r30 and r31 during the hook */
if (get_rt(inst_rt) < 30)
kvm_patch_ins_mtmsrd(inst, inst_rt);
break;
}
switch (_inst) {

View File

@ -64,3 +64,59 @@ kvm_hypercall_start:
/* Disable critical section. We are critical if \
shared->critical == r1 and r2 is always != r1 */ \
STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
.global kvm_emulate_mtmsrd
kvm_emulate_mtmsrd:
SCRATCH_SAVE
/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
lis r30, (~(MSR_EE | MSR_RI))@h
ori r30, r30, (~(MSR_EE | MSR_RI))@l
and r31, r31, r30
/* OR the register's (MSR_EE|MSR_RI) on MSR */
kvm_emulate_mtmsrd_reg:
andi. r30, r0, (MSR_EE|MSR_RI)
or r31, r31, r30
/* Put MSR back into magic page */
STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
/* Check if we have to fetch an interrupt */
lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
cmpwi r31, 0
beq+ no_check
/* Check if we may trigger an interrupt */
andi. r30, r30, MSR_EE
beq no_check
SCRATCH_RESTORE
/* Nag hypervisor */
tlbsync
b kvm_emulate_mtmsrd_branch
no_check:
SCRATCH_RESTORE
/* Go back to caller */
kvm_emulate_mtmsrd_branch:
b .
kvm_emulate_mtmsrd_end:
.global kvm_emulate_mtmsrd_branch_offs
kvm_emulate_mtmsrd_branch_offs:
.long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
.global kvm_emulate_mtmsrd_reg_offs
kvm_emulate_mtmsrd_reg_offs:
.long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
.global kvm_emulate_mtmsrd_len
kvm_emulate_mtmsrd_len:
.long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4