KVM: x86 emulator: add (set|get)_msr callbacks to x86_emulate_ops
Add (set|get)_msr callbacks to x86_emulate_ops instead of calling them directly. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
35aa5375d4
commit
3fb1b5dbd3
@ -139,6 +139,8 @@ struct x86_emulate_ops {
|
|||||||
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||||
int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
|
int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
|
||||||
int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
|
int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
|
||||||
|
int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
|
||||||
|
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Type, address-of, and value of an instruction's operand. */
|
/* Type, address-of, and value of an instruction's operand. */
|
||||||
|
@ -1875,7 +1875,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
emulate_syscall(struct x86_emulate_ctxt *ctxt)
|
emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
||||||
{
|
{
|
||||||
struct decode_cache *c = &ctxt->decode;
|
struct decode_cache *c = &ctxt->decode;
|
||||||
struct kvm_segment cs, ss;
|
struct kvm_segment cs, ss;
|
||||||
@ -1890,7 +1890,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
setup_syscalls_segments(ctxt, &cs, &ss);
|
setup_syscalls_segments(ctxt, &cs, &ss);
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
|
||||||
msr_data >>= 32;
|
msr_data >>= 32;
|
||||||
cs.selector = (u16)(msr_data & 0xfffc);
|
cs.selector = (u16)(msr_data & 0xfffc);
|
||||||
ss.selector = (u16)(msr_data + 8);
|
ss.selector = (u16)(msr_data + 8);
|
||||||
@ -1907,17 +1907,17 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
|
c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu,
|
ops->get_msr(ctxt->vcpu,
|
||||||
ctxt->mode == X86EMUL_MODE_PROT64 ?
|
ctxt->mode == X86EMUL_MODE_PROT64 ?
|
||||||
MSR_LSTAR : MSR_CSTAR, &msr_data);
|
MSR_LSTAR : MSR_CSTAR, &msr_data);
|
||||||
c->eip = msr_data;
|
c->eip = msr_data;
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
|
||||||
ctxt->eflags &= ~(msr_data | EFLG_RF);
|
ctxt->eflags &= ~(msr_data | EFLG_RF);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
/* legacy mode */
|
/* legacy mode */
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
|
||||||
c->eip = (u32)msr_data;
|
c->eip = (u32)msr_data;
|
||||||
|
|
||||||
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
|
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
|
||||||
@ -1927,7 +1927,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
emulate_sysenter(struct x86_emulate_ctxt *ctxt)
|
emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
||||||
{
|
{
|
||||||
struct decode_cache *c = &ctxt->decode;
|
struct decode_cache *c = &ctxt->decode;
|
||||||
struct kvm_segment cs, ss;
|
struct kvm_segment cs, ss;
|
||||||
@ -1949,7 +1949,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
setup_syscalls_segments(ctxt, &cs, &ss);
|
setup_syscalls_segments(ctxt, &cs, &ss);
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
|
||||||
switch (ctxt->mode) {
|
switch (ctxt->mode) {
|
||||||
case X86EMUL_MODE_PROT32:
|
case X86EMUL_MODE_PROT32:
|
||||||
if ((msr_data & 0xfffc) == 0x0) {
|
if ((msr_data & 0xfffc) == 0x0) {
|
||||||
@ -1979,17 +1979,17 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
|
|||||||
kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
|
kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
|
||||||
kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
|
kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
|
||||||
c->eip = msr_data;
|
c->eip = msr_data;
|
||||||
|
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
|
||||||
c->regs[VCPU_REGS_RSP] = msr_data;
|
c->regs[VCPU_REGS_RSP] = msr_data;
|
||||||
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
emulate_sysexit(struct x86_emulate_ctxt *ctxt)
|
emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
||||||
{
|
{
|
||||||
struct decode_cache *c = &ctxt->decode;
|
struct decode_cache *c = &ctxt->decode;
|
||||||
struct kvm_segment cs, ss;
|
struct kvm_segment cs, ss;
|
||||||
@ -2012,7 +2012,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
cs.dpl = 3;
|
cs.dpl = 3;
|
||||||
ss.dpl = 3;
|
ss.dpl = 3;
|
||||||
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
|
ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
|
||||||
switch (usermode) {
|
switch (usermode) {
|
||||||
case X86EMUL_MODE_PROT32:
|
case X86EMUL_MODE_PROT32:
|
||||||
cs.selector = (u16)(msr_data + 16);
|
cs.selector = (u16)(msr_data + 16);
|
||||||
@ -3099,7 +3099,7 @@ twobyte_insn:
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 0x05: /* syscall */
|
case 0x05: /* syscall */
|
||||||
rc = emulate_syscall(ctxt);
|
rc = emulate_syscall(ctxt, ops);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
else
|
else
|
||||||
@ -3155,7 +3155,7 @@ twobyte_insn:
|
|||||||
/* wrmsr */
|
/* wrmsr */
|
||||||
msr_data = (u32)c->regs[VCPU_REGS_RAX]
|
msr_data = (u32)c->regs[VCPU_REGS_RAX]
|
||||||
| ((u64)c->regs[VCPU_REGS_RDX] << 32);
|
| ((u64)c->regs[VCPU_REGS_RDX] << 32);
|
||||||
if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
|
if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
|
||||||
kvm_inject_gp(ctxt->vcpu, 0);
|
kvm_inject_gp(ctxt->vcpu, 0);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -3164,7 +3164,7 @@ twobyte_insn:
|
|||||||
break;
|
break;
|
||||||
case 0x32:
|
case 0x32:
|
||||||
/* rdmsr */
|
/* rdmsr */
|
||||||
if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
|
if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
|
||||||
kvm_inject_gp(ctxt->vcpu, 0);
|
kvm_inject_gp(ctxt->vcpu, 0);
|
||||||
goto done;
|
goto done;
|
||||||
} else {
|
} else {
|
||||||
@ -3175,14 +3175,14 @@ twobyte_insn:
|
|||||||
c->dst.type = OP_NONE;
|
c->dst.type = OP_NONE;
|
||||||
break;
|
break;
|
||||||
case 0x34: /* sysenter */
|
case 0x34: /* sysenter */
|
||||||
rc = emulate_sysenter(ctxt);
|
rc = emulate_sysenter(ctxt, ops);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
else
|
else
|
||||||
goto writeback;
|
goto writeback;
|
||||||
break;
|
break;
|
||||||
case 0x35: /* sysexit */
|
case 0x35: /* sysexit */
|
||||||
rc = emulate_sysexit(ctxt);
|
rc = emulate_sysexit(ctxt, ops);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
else
|
else
|
||||||
|
@ -3811,6 +3811,8 @@ static struct x86_emulate_ops emulate_ops = {
|
|||||||
.set_rflags = emulator_set_rflags,
|
.set_rflags = emulator_set_rflags,
|
||||||
.get_dr = emulator_get_dr,
|
.get_dr = emulator_get_dr,
|
||||||
.set_dr = emulator_set_dr,
|
.set_dr = emulator_set_dr,
|
||||||
|
.set_msr = kvm_set_msr,
|
||||||
|
.get_msr = kvm_get_msr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void cache_all_regs(struct kvm_vcpu *vcpu)
|
static void cache_all_regs(struct kvm_vcpu *vcpu)
|
||||||
|
Loading…
Reference in New Issue
Block a user