KVM: MMU: pass kvm_mmu_page struct to make_spte

The level and A/D bit support of the new SPTE can be found in the role,
which is stored in the kvm_mmu_page struct.  This merges two arguments
into one.

For the TDP MMU, the kvm_mmu_page was not used (kvm_tdp_mmu_map does
not use it if the SPTE is already present) so we fetch it just before
calling make_spte.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2021-08-17 07:43:19 -04:00
parent 87e888eafd
commit 7158bee4b4
5 changed files with 18 additions and 16 deletions

View File

@ -2716,8 +2716,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
was_rmapped = 1;
}
wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
true, host_writable, sp_ad_disabled(sp), &spte);
wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative,
true, host_writable, &spte);
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;

View File

@ -1128,9 +1128,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sptep = &sp->spt[i];
spte = *sptep;
host_writable = spte & shadow_host_writable_mask;
make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
make_spte(vcpu, sp, pte_access, gfn,
spte_to_pfn(spte), spte, true, false,
host_writable, sp_ad_disabled(sp), &spte);
host_writable, &spte);
flush |= mmu_spte_update(sptep, spte);
}

View File

@ -89,15 +89,16 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM);
}
bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
bool can_unsync, bool host_writable, bool ad_disabled,
u64 *new_spte)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync,
bool host_writable, u64 *new_spte)
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
bool wrprot = false;
if (ad_disabled)
if (sp->role.ad_disabled)
spte |= SPTE_TDP_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu))
spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;

View File

@ -334,10 +334,10 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen;
}
bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
bool can_unsync, bool host_writable, bool ad_disabled,
u64 *new_spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync,
bool host_writable, u64 *new_spte);
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
u64 mark_spte_for_access_track(u64 spte);

View File

@ -897,17 +897,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault,
struct tdp_iter *iter)
{
struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep);
u64 new_spte;
int ret = RET_PF_FIXED;
bool wrprot = false;
WARN_ON(sp->role.level != fault->goal_level);
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn,
fault->pfn, iter->old_spte, fault->prefault, true,
fault->map_writable, !shadow_accessed_mask,
&new_spte);
fault->map_writable, &new_spte);
if (new_spte == iter->old_spte)
ret = RET_PF_SPURIOUS;