forked from Minki/linux
Merge branch 'kvm-arm64/concurrent-translation-fault' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
c5db649f3d
@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||
* If device attributes are not explicitly requested in @prot, then the
|
||||
* mapping will be normal, cacheable.
|
||||
*
|
||||
* Note that the update of a valid leaf PTE in this function will be aborted,
|
||||
* if it's trying to recreate the exact same mapping or only change the access
|
||||
* permissions. Instead, the vCPU will exit one more time from guest if still
|
||||
* needed and then go through the path of relaxing permissions.
|
||||
*
|
||||
* Note that this function will both coalesce existing table entries and split
|
||||
* existing block mappings, relying on page-faults to fault back areas outside
|
||||
* of the new mapping lazily.
|
||||
|
@ -45,6 +45,10 @@
|
||||
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
|
||||
|
||||
#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
|
||||
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
|
||||
KVM_PTE_LEAF_ATTR_HI_S2_XN)
|
||||
|
||||
struct kvm_pgtable_walk_data {
|
||||
struct kvm_pgtable *pgt;
|
||||
struct kvm_pgtable_walker *walker;
|
||||
@ -170,10 +174,9 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp)
|
||||
smp_store_release(ptep, pte);
|
||||
}
|
||||
|
||||
static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
|
||||
u32 level)
|
||||
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
|
||||
{
|
||||
kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(pa);
|
||||
kvm_pte_t pte = kvm_phys_to_pte(pa);
|
||||
u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
|
||||
KVM_PTE_TYPE_BLOCK;
|
||||
|
||||
@ -181,12 +184,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
|
||||
pte |= FIELD_PREP(KVM_PTE_TYPE, type);
|
||||
pte |= KVM_PTE_VALID;
|
||||
|
||||
/* Tolerate KVM recreating the exact same mapping. */
|
||||
if (kvm_pte_valid(old))
|
||||
return old == pte;
|
||||
|
||||
smp_store_release(ptep, pte);
|
||||
return true;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
|
||||
@ -341,12 +339,17 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
|
||||
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep, struct hyp_map_data *data)
|
||||
{
|
||||
kvm_pte_t new, old = *ptep;
|
||||
u64 granule = kvm_granule_size(level), phys = data->phys;
|
||||
|
||||
if (!kvm_block_mapping_supported(addr, end, phys, level))
|
||||
return false;
|
||||
|
||||
WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level));
|
||||
/* Tolerate KVM recreating the exact same mapping */
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
|
||||
if (old != new && !WARN_ON(kvm_pte_valid(old)))
|
||||
smp_store_release(ptep, new);
|
||||
|
||||
data->phys += granule;
|
||||
return true;
|
||||
}
|
||||
@ -461,34 +464,41 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
kvm_pte_t new, old = *ptep;
|
||||
u64 granule = kvm_granule_size(level), phys = data->phys;
|
||||
struct page *page = virt_to_page(ptep);
|
||||
|
||||
if (!kvm_block_mapping_supported(addr, end, phys, level))
|
||||
return false;
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* If the PTE was already valid, drop the refcount on the table
|
||||
* early, as it will be bumped-up again in stage2_map_walk_leaf().
|
||||
* This ensures that the refcount stays constant across a valid to
|
||||
* valid PTE update.
|
||||
*/
|
||||
if (kvm_pte_valid(*ptep))
|
||||
put_page(virt_to_page(ptep));
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
|
||||
if (kvm_pte_valid(old)) {
|
||||
/*
|
||||
* Skip updating the PTE if we are trying to recreate the exact
|
||||
* same mapping or only change the access permissions. Instead,
|
||||
* the vCPU will exit one more time from guest if still needed
|
||||
* and then go through the path of relaxing permissions.
|
||||
*/
|
||||
if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)))
|
||||
return -EAGAIN;
|
||||
|
||||
if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level))
|
||||
goto out;
|
||||
/*
|
||||
* There's an existing different valid leaf entry, so perform
|
||||
* break-before-make.
|
||||
*/
|
||||
kvm_set_invalid_pte(ptep);
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/* There's an existing valid leaf entry, so perform break-before-make */
|
||||
kvm_set_invalid_pte(ptep);
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||
kvm_set_valid_leaf_pte(ptep, phys, data->attr, level);
|
||||
out:
|
||||
smp_store_release(ptep, new);
|
||||
get_page(page);
|
||||
data->phys += granule;
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
||||
@ -516,6 +526,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
||||
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
int ret;
|
||||
kvm_pte_t *childp, pte = *ptep;
|
||||
struct page *page = virt_to_page(ptep);
|
||||
|
||||
@ -526,8 +537,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (stage2_map_walker_try_leaf(addr, end, level, ptep, data))
|
||||
goto out_get_page;
|
||||
ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
|
||||
if (ret != -E2BIG)
|
||||
return ret;
|
||||
|
||||
if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
|
||||
return -EINVAL;
|
||||
@ -551,9 +563,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
}
|
||||
|
||||
kvm_set_table_pte(ptep, childp);
|
||||
|
||||
out_get_page:
|
||||
get_page(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (vma_pagesize == PAGE_SIZE && !force_pte)
|
||||
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
|
||||
&pfn, &fault_ipa);
|
||||
if (writable) {
|
||||
if (writable)
|
||||
prot |= KVM_PGTABLE_PROT_W;
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
}
|
||||
|
||||
if (fault_status != FSC_PERM && !device)
|
||||
clean_dcache_guest_page(pfn, vma_pagesize);
|
||||
@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
memcache);
|
||||
}
|
||||
|
||||
/* Mark the page dirty only if the fault is handled successfully */
|
||||
if (writable && !ret) {
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return ret;
|
||||
return ret != -EAGAIN ? ret : 0;
|
||||
}
|
||||
|
||||
/* Resolve the access fault by making the page young again. */
|
||||
|
Loading…
Reference in New Issue
Block a user