KVM, pkeys: expose CPUID/CR4 to guest

X86_FEATURE_PKU is referred to as "PKU" in the hardware documentation:
CPUID.7.0.ECX[3]:PKU. X86_FEATURE_OSPKE is software support for pkeys,
enumerated with CPUID.7.0.ECX[4]:OSPKE, and it reflects the setting of
CR4.PKE(bit 22).

This patch disables CPUID:PKU without ept, because pkeys is not yet
implemented for shadow paging.

Signed-off-by: Huaitong Han <huaitong.han@intel.com>
Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Huaitong Han 2016-03-22 16:51:21 +08:00 committed by Paolo Bonzini
parent be94f6b710
commit b9baba8614
4 changed files with 37 additions and 6 deletions

View File

@ -84,7 +84,8 @@
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP)) | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
| X86_CR4_PKE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)

View File

@ -88,6 +88,16 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
apic->lapic_timer.timer_mode_mask = 1 << 17; apic->lapic_timer.timer_mode_mask = 1 << 17;
} }
best = kvm_find_cpuid_entry(vcpu, 7, 0);
if (best) {
/* Update OSPKE bit */
if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
best->ecx &= ~F(OSPKE);
if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
best->ecx |= F(OSPKE);
}
}
best = kvm_find_cpuid_entry(vcpu, 0xD, 0); best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
if (!best) { if (!best) {
vcpu->arch.guest_supported_xcr0 = 0; vcpu->arch.guest_supported_xcr0 = 0;
@ -360,6 +370,9 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_cpuid_D_1_eax_x86_features = const u32 kvm_cpuid_D_1_eax_x86_features =
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves; F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
/* all calls to cpuid_count() should be made on the same cpu */ /* all calls to cpuid_count() should be made on the same cpu */
get_cpu(); get_cpu();
@ -437,10 +450,16 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
cpuid_mask(&entry->ebx, CPUID_7_0_EBX); cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
// TSC_ADJUST is emulated // TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST); entry->ebx |= F(TSC_ADJUST);
} else entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled)
entry->ecx &= ~F(PKU);
} else {
entry->ebx = 0; entry->ebx = 0;
entry->ecx = 0;
}
entry->eax = 0; entry->eax = 0;
entry->ecx = 0;
entry->edx = 0; entry->edx = 0;
break; break;
} }

View File

@ -80,6 +80,14 @@ static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
} }
static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ecx & bit(X86_FEATURE_PKU));
}
static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;

View File

@ -723,7 +723,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP | X86_CR4_SMAP; X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
if (cr4 & CR4_RESERVED_BITS) if (cr4 & CR4_RESERVED_BITS)
return 1; return 1;
@ -740,6 +740,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
return 1; return 1;
if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
return 1;
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) if (!(cr4 & X86_CR4_PAE))
return 1; return 1;
@ -765,7 +768,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
kvm_update_cpuid(vcpu); kvm_update_cpuid(vcpu);
return 0; return 0;
@ -7128,7 +7131,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4); kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE) if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
kvm_update_cpuid(vcpu); kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);