mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
* PAE and PKU bugfixes for x86
* selftests fix for new binutils * MMU notifier fix for arm64 -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl9ARnoUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroP2YAf/dgLrPm4y4jxm7Aiz3/txqrHEwogT ZtvnzqUPb6+vkFrkop8QMOPw7A8NCfkn3/6sWbyUN5ObgOG1pxKyPraeN3ZdsDoR KGwv6P0dKgI8B4UuGEMe9GazXv+oOv8+bSUJnE+HZiUHzJKlX4HJbxDwUhvSSatY qYCZb/Uzqundh79TYULa7oI1/3F15A2J1zQPe4QgkToH9tsVB8PVfkH5uPJPp64M DTm5+qgwwsBULFaAuuo3FTs9f3pWJxn8GOuico1Sm+RnR53mhbUJggUfFzP0rwzZ Emevunje5r1rluFs+JWeNtflGH0gI4CLak7jvlOOBjrNb5XJgUSbzLXxkA== =Jwic -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: - PAE and PKU bugfixes for x86 - selftests fix for new binutils - MMU notifier fix for arm64 * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set KVM: Pass MMU notifier range flags to kvm_unmap_hva_range() kvm: x86: Toggling CR4.PKE does not load PDPTEs in PAE mode kvm: x86: Toggling CR4.SMAP does not load PDPTEs in PAE mode KVM: x86: fix access code passed to gva_to_gpa selftests: kvm: Use a shorter encoding to clear RAX
This commit is contained in:
commit
b2d9e99622
@ -473,7 +473,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end, unsigned flags);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
|
||||
* destroying the VM), otherwise another faulting VCPU may come in and mess
|
||||
* with things behind our backs.
|
||||
*/
|
||||
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
|
||||
bool may_block)
|
||||
{
|
||||
struct kvm *kvm = mmu->kvm;
|
||||
pgd_t *pgd;
|
||||
@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
|
||||
* If the range is too large, release the kvm->mmu_lock
|
||||
* to prevent starvation and lockup detector warnings.
|
||||
*/
|
||||
if (next != end)
|
||||
if (may_block && next != end)
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||
{
|
||||
__unmap_stage2_range(mmu, start, size, true);
|
||||
}
|
||||
|
||||
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
@ -2208,18 +2214,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
|
||||
|
||||
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||
{
|
||||
unmap_stage2_range(&kvm->arch.mmu, gpa, size);
|
||||
unsigned flags = *(unsigned *)data;
|
||||
bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
|
||||
|
||||
__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end)
|
||||
unsigned long start, unsigned long end, unsigned flags)
|
||||
{
|
||||
if (!kvm->arch.mmu.pgd)
|
||||
return 0;
|
||||
|
||||
trace_kvm_unmap_hva_range(start, end);
|
||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end, unsigned flags);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||
|
||||
|
@ -58,7 +58,8 @@
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
|
||||
extern int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned flags);
|
||||
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
||||
}
|
||||
|
@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
/* kvm_unmap_hva flushes everything anyways */
|
||||
kvm_unmap_hva(kvm, start);
|
||||
|
@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void);
|
||||
_ASM_EXTABLE(666b, 667b)
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||
}
|
||||
|
@ -975,7 +975,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
|
||||
X86_CR4_SMEP;
|
||||
|
||||
if (kvm_valid_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
@ -10751,9 +10751,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
|
||||
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
|
||||
{
|
||||
struct x86_exception fault;
|
||||
u32 access = error_code &
|
||||
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
|
||||
|
||||
if (!(error_code & PFERR_PRESENT_MASK) ||
|
||||
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
|
||||
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
|
||||
/*
|
||||
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
|
||||
* tables probably do not match the TLB. Just proceed
|
||||
|
@ -40,11 +40,11 @@ static void guest_code(void)
|
||||
|
||||
/* Single step test, covers 2 basic instructions and 2 emulated */
|
||||
asm volatile("ss_start: "
|
||||
"xor %%rax,%%rax\n\t"
|
||||
"xor %%eax,%%eax\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl $0x1a0,%%ecx\n\t"
|
||||
"rdmsr\n\t"
|
||||
: : : "rax", "ecx");
|
||||
: : : "eax", "ebx", "ecx", "edx");
|
||||
|
||||
/* DR6.BD test */
|
||||
asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
|
||||
|
@ -482,7 +482,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||
* count is also read inside the mmu_lock critical section.
|
||||
*/
|
||||
kvm->mmu_notifier_count++;
|
||||
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
|
||||
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
|
||||
range->flags);
|
||||
need_tlb_flush |= kvm->tlbs_dirty;
|
||||
/* we've to flush the tlb before the pages can be freed */
|
||||
if (need_tlb_flush)
|
||||
|
Loading…
Reference in New Issue
Block a user