Merge branch kvm-arm64/parallel-access-faults into kvmarm/next

* kvm-arm64/parallel-access-faults:
  : Parallel stage-2 access fault handling
  :
  : The parallel faults changes that went in to 6.2 covered most stage-2
  : aborts, with the exception of stage-2 access faults. Building on top of
  : the new infrastructure, this series adds support for handling access
  : faults (i.e. updating the access flag) in parallel.
  :
  : This is expected to provide a performance uplift for cores that do not
  : implement FEAT_HAFDBS, such as those from the fruit company.
  KVM: arm64: Condition HW AF updates on config option
  KVM: arm64: Handle access faults behind the read lock
  KVM: arm64: Don't serialize if the access flag isn't set
  KVM: arm64: Return EAGAIN for invalid PTE in attr walker
  KVM: arm64: Ignore EAGAIN for walks outside of a fault
  KVM: arm64: Use KVM's pte type/helpers in handle_access_fault()

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2023-02-13 22:33:10 +00:00
commit 52b603628a
3 changed files with 54 additions and 15 deletions

View File

@ -71,6 +71,11 @@ static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
return pte;
}
static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
{
return __phys_to_pfn(kvm_pte_to_phys(pte));
}
static inline u64 kvm_granule_shift(u32 level)
{
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
@ -188,12 +193,15 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
* children.
* @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
* with other software walkers.
* @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
* invoked from a fault handler.
*/
enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_LEAF = BIT(0),
KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
KVM_PGTABLE_WALK_SHARED = BIT(3),
KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
};
struct kvm_pgtable_visit_ctx {

View File

@ -168,6 +168,25 @@ static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
return walker->cb(ctx, visit);
}
static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
int r)
{
/*
* Visitor callbacks return EAGAIN when the conditions that led to a
* fault are no longer reflected in the page tables due to a race to
* update a PTE. In the context of a fault handler this is interpreted
* as a signal to retry guest execution.
*
* Ignore the return code altogether for walkers outside a fault handler
* (e.g. write protecting a range of memory) and chug along with the
* page table walk.
*/
if (r == -EAGAIN)
return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
return !r;
}
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
@ -200,7 +219,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
table = kvm_pte_table(ctx.old, level);
}
if (ret)
if (!kvm_pgtable_walk_continue(data->walker, ret))
goto out;
if (!table) {
@ -211,13 +230,16 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
if (ret)
if (!kvm_pgtable_walk_continue(data->walker, ret))
goto out;
if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
out:
if (kvm_pgtable_walk_continue(data->walker, ret))
return 0;
return ret;
}
@ -584,12 +606,14 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
lvls = 2;
vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable the Hardware Access Flag management, unconditionally
* on all CPUs. The features is RES0 on CPUs without the support
* and must be ignored by the CPUs.
*/
vtcr |= VTCR_EL2_HA;
#endif /* CONFIG_ARM64_HW_AFDBM */
/* Set the vmid bits */
vtcr |= (get_vmid_bits(mmfr1) == 16) ?
@ -1026,7 +1050,7 @@ static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
if (!kvm_pte_valid(ctx->old))
return 0;
return -EAGAIN;
data->level = ctx->level;
data->pte = pte;
@ -1094,9 +1118,15 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
&pte, NULL, 0);
dsb(ishst);
int ret;
ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
&pte, NULL,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
if (!ret)
dsb(ishst);
return pte;
}
@ -1141,6 +1171,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
if (!ret)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);

View File

@ -1383,7 +1383,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
memcache, KVM_PGTABLE_WALK_SHARED);
memcache,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@ -1401,20 +1403,18 @@ out_unlock:
/* Resolve the access fault by making the page young again. */
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
pte_t pte;
kvm_pte_t kpte;
kvm_pte_t pte;
struct kvm_s2_mmu *mmu;
trace_kvm_access_fault(fault_ipa);
write_lock(&vcpu->kvm->mmu_lock);
read_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
write_unlock(&vcpu->kvm->mmu_lock);
pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
read_unlock(&vcpu->kvm->mmu_lock);
pte = __pte(kpte);
if (pte_valid(pte))
kvm_set_pfn_accessed(pte_pfn(pte));
if (kvm_pte_valid(pte))
kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
}
/**