KVM: VMX: Add proper cache tracking for CR4
Move CR4 caching into the standard register caching mechanism in order to take advantage of the availability checks provided by regs_avail. This avoids multiple VMREADs and retpolines (when configured) during nested VMX transitions as kvm_read_cr4_bits() is invoked multiple times on each transition, e.g. when stuffing CR0 and CR3. As an added bonus, this eliminates a kvm_x86_ops hook, saves a retpoline on SVM when reading CR4, and squashes the confusing naming discrepancy of "cache_reg" vs. "decache_cr4_guest_bits". No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200502043234.12481-7-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0cc69204e7
commit
f98c1e7712
@ -168,6 +168,7 @@ enum kvm_reg {
|
|||||||
|
|
||||||
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
|
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
|
||||||
VCPU_EXREG_CR3,
|
VCPU_EXREG_CR3,
|
||||||
|
VCPU_EXREG_CR4,
|
||||||
VCPU_EXREG_RFLAGS,
|
VCPU_EXREG_RFLAGS,
|
||||||
VCPU_EXREG_SEGMENTS,
|
VCPU_EXREG_SEGMENTS,
|
||||||
VCPU_EXREG_EXIT_INFO_1,
|
VCPU_EXREG_EXIT_INFO_1,
|
||||||
@ -1092,7 +1093,6 @@ struct kvm_x86_ops {
|
|||||||
struct kvm_segment *var, int seg);
|
struct kvm_segment *var, int seg);
|
||||||
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
||||||
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
|
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
|
||||||
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
|
|
||||||
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
|
@ -129,8 +129,9 @@ static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
|
|||||||
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||||
{
|
{
|
||||||
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
||||||
if (tmask & vcpu->arch.cr4_guest_owned_bits)
|
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
|
||||||
kvm_x86_ops.decache_cr4_guest_bits(vcpu);
|
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
|
||||||
|
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
|
||||||
return vcpu->arch.cr4 & mask;
|
return vcpu->arch.cr4 & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1525,10 +1525,6 @@ static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void update_cr0_intercept(struct vcpu_svm *svm)
|
static void update_cr0_intercept(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
ulong gcr0 = svm->vcpu.arch.cr0;
|
ulong gcr0 = svm->vcpu.arch.cr0;
|
||||||
@ -4007,7 +4003,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.get_cpl = svm_get_cpl,
|
.get_cpl = svm_get_cpl,
|
||||||
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
|
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
|
||||||
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
|
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
|
||||||
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
|
|
||||||
.set_cr0 = svm_set_cr0,
|
.set_cr0 = svm_set_cr0,
|
||||||
.set_cr4 = svm_set_cr4,
|
.set_cr4 = svm_set_cr4,
|
||||||
.set_efer = svm_set_efer,
|
.set_efer = svm_set_efer,
|
||||||
|
@ -2204,6 +2204,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
|
|
||||||
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||||
{
|
{
|
||||||
|
unsigned long guest_owned_bits;
|
||||||
|
|
||||||
kvm_register_mark_available(vcpu, reg);
|
kvm_register_mark_available(vcpu, reg);
|
||||||
|
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
@ -2221,6 +2223,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
|||||||
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
|
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
|
||||||
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
||||||
break;
|
break;
|
||||||
|
case VCPU_EXREG_CR4:
|
||||||
|
guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
|
||||||
|
|
||||||
|
vcpu->arch.cr4 &= ~guest_owned_bits;
|
||||||
|
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
break;
|
break;
|
||||||
@ -2922,14 +2930,6 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
|
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
|
|
||||||
|
|
||||||
vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
|
|
||||||
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||||
@ -3128,6 +3128,7 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
vcpu->arch.cr4 = cr4;
|
vcpu->arch.cr4 = cr4;
|
||||||
|
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
|
||||||
|
|
||||||
if (!enable_unrestricted_guest) {
|
if (!enable_unrestricted_guest) {
|
||||||
if (enable_ept) {
|
if (enable_ept) {
|
||||||
@ -7809,7 +7810,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||||||
.get_cpl = vmx_get_cpl,
|
.get_cpl = vmx_get_cpl,
|
||||||
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
|
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
|
||||||
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
|
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
|
||||||
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
|
|
||||||
.set_cr0 = vmx_set_cr0,
|
.set_cr0 = vmx_set_cr0,
|
||||||
.set_cr4 = vmx_set_cr4,
|
.set_cr4 = vmx_set_cr4,
|
||||||
.set_efer = vmx_set_efer,
|
.set_efer = vmx_set_efer,
|
||||||
|
@ -453,6 +453,7 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
|
|||||||
| (1 << VCPU_EXREG_PDPTR)
|
| (1 << VCPU_EXREG_PDPTR)
|
||||||
| (1 << VCPU_EXREG_SEGMENTS)
|
| (1 << VCPU_EXREG_SEGMENTS)
|
||||||
| (1 << VCPU_EXREG_CR3)
|
| (1 << VCPU_EXREG_CR3)
|
||||||
|
| (1 << VCPU_EXREG_CR4)
|
||||||
| (1 << VCPU_EXREG_EXIT_INFO_1)
|
| (1 << VCPU_EXREG_EXIT_INFO_1)
|
||||||
| (1 << VCPU_EXREG_EXIT_INFO_2));
|
| (1 << VCPU_EXREG_EXIT_INFO_2));
|
||||||
vcpu->arch.regs_dirty = 0;
|
vcpu->arch.regs_dirty = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user