KVM/arm64 fixes for Linux 5.8, take #3

- Fix a corner case of a new mapping inheriting exec permission without
   and yet bypassing invalidation of the I-cache
 - Make sure PtrAuth predicates oinly generate inline code for the
   non-VHE hypervisor code
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl8f3r4PHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDgXgP/jFGbjwpyXxllzl+qHO7ffuhgYL/fph9gfB6
 XTjQOhGttyAlCgecWID5Lw8OJ+P/S0cZzteu/v2nw2c9RIqOAOsf8kDEpegXKVjn
 htbgLacg2/nJQy1iTwjhHXpRqi5yDCp6pMh5Qy0TKSuRTOGUrlz9EtO7xtkOVeMF
 +DZrlcBtBk5NS6ZOdrgjsI8AiDExFSZm6lkW6R7pmqIsVBk8aUDWMfLq1rIZArzO
 H9KikiOVmgh5+vlpUXv1SVqHArCGe8K8I13atFgp8RYPQUZ8QJT7C0mdtwUGwjAi
 EPfHXmx0RKRws5uXq3P6ifxGcxFtjYCRplx4gX/LCDTnN3KNsWDjhjsGDC2DSOXY
 EpFVj+sj0FQgA3vauaLlyJRd/g9H5a5ptZDBkqfsP7EKXVLVX9HXUwQjL753HUjb
 z/E1EWA4v7uF9ysPkKxmlO4WLqLFNFS6FlFnZl2n8GSg49/ZvfipUA5UqVBwvdpj
 Vy6zpTYyojXirj+4puphoDem/Tyf4gnxIEX6VPw4noxpw2XZJ9JsrR+NIzUzsMV9
 fYap1UD67JIEpnjP0qw50QCxOe3YMmPJhs3c148biki0Jc+5E0otipYaIqI7WhE0
 E8EQISIkuVSoCu6FN6kcFOfIHII5AWmj7qsvjrdb6mHNyx3/kq93tQH4mc/1ajHs
 IkkBi3Mk
 =4r6/
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-5.8-4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm64 fixes for Linux 5.8, take #3

- Fix a corner case of a new mapping inheriting exec permission without
  and yet bypassing invalidation of the I-cache
- Make sure PtrAuth predicates oinly generate inline code for the
  non-VHE hypervisor code
This commit is contained in:
Paolo Bonzini 2020-07-30 18:10:26 -04:00
commit d741dcae55
2 changed files with 14 additions and 8 deletions

View File

@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
system_supports_generic_auth()) && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)

View File

@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true;
}
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{
pud_t *pudp;
pmd_t *pmdp;
@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false;
if (pudp)
return kvm_s2pud_exec(pudp);
return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp)
return kvm_s2pmd_exec(pmdp);
return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else
return kvm_s2pte_exec(ptep);
return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
(fault_status == FSC_PERM &&
stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);