KVM: MMU: Expose the LA57 feature to VM.
This patch exposes 5 level page table feature to the VM. At the same time, the canonical virtual address checking is extended to support both 48-bits and 57-bits address width. Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									855feb6736
								
							
						
					
					
						commit
						fd8cb43373
					
				| @ -85,8 +85,8 @@ | ||||
| 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \ | ||||
| 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | ||||
| 			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | ||||
| 			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \ | ||||
| 			  | X86_CR4_PKE)) | ||||
| 			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ | ||||
| 			  | X86_CR4_SMAP | X86_CR4_PKE)) | ||||
| 
 | ||||
| #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | ||||
| 
 | ||||
| @ -1300,20 +1300,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | ||||
| 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | ||||
| } | ||||
| 
 | ||||
| static inline u64 get_canonical(u64 la) | ||||
| { | ||||
| 	return ((int64_t)la << 16) >> 16; | ||||
| } | ||||
| 
 | ||||
| static inline bool is_noncanonical_address(u64 la) | ||||
| { | ||||
| #ifdef CONFIG_X86_64 | ||||
| 	return get_canonical(la) != la; | ||||
| #else | ||||
| 	return false; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| #define TSS_IOPB_BASE_OFFSET 0x66 | ||||
| #define TSS_BASE_SIZE 0x68 | ||||
| #define TSS_IOPB_SIZE (65536 / 8) | ||||
|  | ||||
| @ -126,13 +126,16 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) | ||||
| 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The existing code assumes virtual address is 48-bit in the canonical | ||||
| 	 * address checks; exit if it is ever changed. | ||||
| 	 * The existing code assumes virtual address is 48-bit or 57-bit in the | ||||
| 	 * canonical address checks; exit if it is ever changed. | ||||
| 	 */ | ||||
| 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); | ||||
| 	if (best && ((best->eax & 0xff00) >> 8) != 48 && | ||||
| 		((best->eax & 0xff00) >> 8) != 0) | ||||
| 		return -EINVAL; | ||||
| 	if (best) { | ||||
| 		int vaddr_bits = (best->eax & 0xff00) >> 8; | ||||
| 
 | ||||
| 		if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) | ||||
| 			return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Update physical-address width */ | ||||
| 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); | ||||
| @ -384,7 +387,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | ||||
| 
 | ||||
| 	/* cpuid 7.0.ecx*/ | ||||
| 	const u32 kvm_cpuid_7_0_ecx_x86_features = | ||||
| 		F(AVX512VBMI) | F(PKU) | 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ); | ||||
| 		F(AVX512VBMI) | F(LA57) | F(PKU) | | ||||
| 		0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ); | ||||
| 
 | ||||
| 	/* cpuid 7.0.edx*/ | ||||
| 	const u32 kvm_cpuid_7_0_edx_x86_features = | ||||
|  | ||||
| @ -689,16 +689,18 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, | ||||
| 	ulong la; | ||||
| 	u32 lim; | ||||
| 	u16 sel; | ||||
| 	u8  va_bits; | ||||
| 
 | ||||
| 	la = seg_base(ctxt, addr.seg) + addr.ea; | ||||
| 	*max_size = 0; | ||||
| 	switch (mode) { | ||||
| 	case X86EMUL_MODE_PROT64: | ||||
| 		*linear = la; | ||||
| 		if (is_noncanonical_address(la)) | ||||
| 		va_bits = ctxt_virt_addr_bits(ctxt); | ||||
| 		if (get_canonical(la, va_bits) != la) | ||||
| 			goto bad; | ||||
| 
 | ||||
| 		*max_size = min_t(u64, ~0u, (1ull << 48) - la); | ||||
| 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la); | ||||
| 		if (size > *max_size) | ||||
| 			goto bad; | ||||
| 		break; | ||||
| @ -1749,8 +1751,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||||
| 				sizeof(base3), &ctxt->exception); | ||||
| 		if (ret != X86EMUL_CONTINUE) | ||||
| 			return ret; | ||||
| 		if (is_noncanonical_address(get_desc_base(&seg_desc) | | ||||
| 					     ((u64)base3 << 32))) | ||||
| 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | | ||||
| 				((u64)base3 << 32), ctxt)) | ||||
| 			return emulate_gp(ctxt, 0); | ||||
| 	} | ||||
| load: | ||||
| @ -2841,8 +2843,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | ||||
| 		ss_sel = cs_sel + 8; | ||||
| 		cs.d = 0; | ||||
| 		cs.l = 1; | ||||
| 		if (is_noncanonical_address(rcx) || | ||||
| 		    is_noncanonical_address(rdx)) | ||||
| 		if (emul_is_noncanonical_address(rcx, ctxt) || | ||||
| 		    emul_is_noncanonical_address(rdx, ctxt)) | ||||
| 			return emulate_gp(ctxt, 0); | ||||
| 		break; | ||||
| 	} | ||||
| @ -3757,7 +3759,7 @@ static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) | ||||
| 	if (rc != X86EMUL_CONTINUE) | ||||
| 		return rc; | ||||
| 	if (ctxt->mode == X86EMUL_MODE_PROT64 && | ||||
| 	    is_noncanonical_address(desc_ptr.address)) | ||||
| 	    emul_is_noncanonical_address(desc_ptr.address, ctxt)) | ||||
| 		return emulate_gp(ctxt, 0); | ||||
| 	if (lgdt) | ||||
| 		ctxt->ops->set_gdt(ctxt, &desc_ptr); | ||||
|  | ||||
| @ -4,7 +4,7 @@ | ||||
| #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS | ||||
| #define KVM_POSSIBLE_CR4_GUEST_BITS				  \ | ||||
| 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \ | ||||
| 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE) | ||||
| 	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) | ||||
| 
 | ||||
| static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, | ||||
| 					      enum kvm_reg reg) | ||||
|  | ||||
| @ -122,7 +122,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); | ||||
| 	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||||
| #define KVM_CR4_GUEST_OWNED_BITS				      \ | ||||
| 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \ | ||||
| 	 | X86_CR4_OSXMMEXCPT | X86_CR4_TSD) | ||||
| 	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) | ||||
| 
 | ||||
| #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | ||||
| #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | ||||
| @ -3374,7 +3374,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | ||||
| 		    (!msr_info->host_initiated && | ||||
| 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) | ||||
| 			return 1; | ||||
| 		if (is_noncanonical_address(data & PAGE_MASK) || | ||||
| 		if (is_noncanonical_address(data & PAGE_MASK, vcpu) || | ||||
| 		    (data & MSR_IA32_BNDCFGS_RSVD)) | ||||
| 			return 1; | ||||
| 		vmcs_write64(GUEST_BNDCFGS, data); | ||||
| @ -7143,7 +7143,7 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, | ||||
| 		 * non-canonical form. This is the only check on the memory | ||||
| 		 * destination for long mode! | ||||
| 		 */ | ||||
| 		exn = is_noncanonical_address(*ret); | ||||
| 		exn = is_noncanonical_address(*ret, vcpu); | ||||
| 	} else if (is_protmode(vcpu)) { | ||||
| 		/* Protected mode: apply checks for segment validity in the
 | ||||
| 		 * following order: | ||||
| @ -7948,7 +7948,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | ||||
| 
 | ||||
| 	switch (type) { | ||||
| 	case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: | ||||
| 		if (is_noncanonical_address(operand.gla)) { | ||||
| 		if (is_noncanonical_address(operand.gla, vcpu)) { | ||||
| 			nested_vmx_failValid(vcpu, | ||||
| 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | ||||
| 			return kvm_skip_emulated_instruction(vcpu); | ||||
|  | ||||
| @ -769,6 +769,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||||
| 	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) | ||||
| 		return 1; | ||||
| 
 | ||||
| 	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) | ||||
| 		return 1; | ||||
| 
 | ||||
| 	if (is_long_mode(vcpu)) { | ||||
| 		if (!(cr4 & X86_CR4_PAE)) | ||||
| 			return 1; | ||||
| @ -1074,7 +1077,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | ||||
| 	case MSR_KERNEL_GS_BASE: | ||||
| 	case MSR_CSTAR: | ||||
| 	case MSR_LSTAR: | ||||
| 		if (is_noncanonical_address(msr->data)) | ||||
| 		if (is_noncanonical_address(msr->data, vcpu)) | ||||
| 			return 1; | ||||
| 		break; | ||||
| 	case MSR_IA32_SYSENTER_EIP: | ||||
| @ -1091,7 +1094,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | ||||
| 		 * value, and that something deterministic happens if the guest | ||||
| 		 * invokes 64-bit SYSENTER. | ||||
| 		 */ | ||||
| 		msr->data = get_canonical(msr->data); | ||||
| 		msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu)); | ||||
| 	} | ||||
| 	return kvm_x86_ops->set_msr(vcpu, msr); | ||||
| } | ||||
|  | ||||
| @ -97,6 +97,40 @@ static inline u32 bit(int bitno) | ||||
| 	return 1 << (bitno & 31); | ||||
| } | ||||
| 
 | ||||
| static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; | ||||
| } | ||||
| 
 | ||||
| static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) | ||||
| { | ||||
| 	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; | ||||
| } | ||||
| 
 | ||||
| static inline u64 get_canonical(u64 la, u8 vaddr_bits) | ||||
| { | ||||
| 	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); | ||||
| } | ||||
| 
 | ||||
| static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) | ||||
| { | ||||
| #ifdef CONFIG_X86_64 | ||||
| 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; | ||||
| #else | ||||
| 	return false; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static inline bool emul_is_noncanonical_address(u64 la, | ||||
| 						struct x86_emulate_ctxt *ctxt) | ||||
| { | ||||
| #ifdef CONFIG_X86_64 | ||||
| 	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; | ||||
| #else | ||||
| 	return false; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, | ||||
| 					gva_t gva, gfn_t gfn, unsigned access) | ||||
| { | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user