KVM: x86: allow kvm_x86_ops.set_efer to return an error value
This will be used to signal an error to the userspace, in case the vendor code failed during handling of this msr. (e.g -ENOMEM) Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20201001112954.6258-4-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
7dffecaf4e
commit
72f211ecaa
@@ -1101,7 +1101,7 @@ struct kvm_x86_ops {
|
|||||||
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
||||||
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ static int get_max_npt_level(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
vcpu->arch.efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
@@ -283,6 +283,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|||||||
|
|
||||||
svm->vmcb->save.efer = efer | EFER_SVME;
|
svm->vmcb->save.efer = efer | EFER_SVME;
|
||||||
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
|
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_external_interrupt(u32 info)
|
static int is_external_interrupt(u32 info)
|
||||||
|
|||||||
@@ -350,7 +350,7 @@ static inline bool gif_set(struct vcpu_svm *svm)
|
|||||||
#define MSR_INVALID 0xffffffffU
|
#define MSR_INVALID 0xffffffffU
|
||||||
|
|
||||||
u32 svm_msrpm_offset(u32 msr);
|
u32 svm_msrpm_offset(u32 msr);
|
||||||
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void svm_flush_tlb(struct kvm_vcpu *vcpu);
|
void svm_flush_tlb(struct kvm_vcpu *vcpu);
|
||||||
|
|||||||
@@ -2815,13 +2815,14 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
|||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
|
struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
|
||||||
|
|
||||||
|
/* Nothing to do if hardware doesn't support EFER. */
|
||||||
if (!msr)
|
if (!msr)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
vcpu->arch.efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
if (efer & EFER_LMA) {
|
if (efer & EFER_LMA) {
|
||||||
@@ -2833,6 +2834,7 @@ void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|||||||
msr->data = efer & ~EFER_LME;
|
msr->data = efer & ~EFER_LME;
|
||||||
}
|
}
|
||||||
setup_msrs(vmx);
|
setup_msrs(vmx);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|||||||
@@ -319,7 +319,7 @@ unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
|
|||||||
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||||
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
|
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
|
||||||
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
|
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
|
||||||
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
|
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
|
||||||
|
|||||||
@@ -1457,6 +1457,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
{
|
{
|
||||||
u64 old_efer = vcpu->arch.efer;
|
u64 old_efer = vcpu->arch.efer;
|
||||||
u64 efer = msr_info->data;
|
u64 efer = msr_info->data;
|
||||||
|
int r;
|
||||||
|
|
||||||
if (efer & efer_reserved_bits)
|
if (efer & efer_reserved_bits)
|
||||||
return 1;
|
return 1;
|
||||||
@@ -1473,7 +1474,11 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
efer &= ~EFER_LMA;
|
efer &= ~EFER_LMA;
|
||||||
efer |= vcpu->arch.efer & EFER_LMA;
|
efer |= vcpu->arch.efer & EFER_LMA;
|
||||||
|
|
||||||
kvm_x86_ops.set_efer(vcpu, efer);
|
r = kvm_x86_ops.set_efer(vcpu, efer);
|
||||||
|
if (r) {
|
||||||
|
WARN_ON(r > 0);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update reserved bits */
|
/* Update reserved bits */
|
||||||
if ((efer ^ old_efer) & EFER_NX)
|
if ((efer ^ old_efer) & EFER_NX)
|
||||||
|
|||||||
Reference in New Issue
Block a user