KVM: X86/MMU: Add sp_has_gptes()
Add sp_has_gptes() which equals to !sp->role.direct currently. Shadow page having gptes needs to be write-protected, accounted and responded to kvm_mmu_pte_write(). Use it in these places to replace !sp->role.direct and rename for_each_gfn_indirect_valid_sp. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Message-Id: <20220420131204.2850-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9f084f7c2e
commit
767d8d8d50
@ -1853,15 +1853,23 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
||||
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||
struct list_head *invalid_list);
|
||||
|
||||
static bool sp_has_gptes(struct kvm_mmu_page *sp)
|
||||
{
|
||||
if (sp->role.direct)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define for_each_valid_sp(_kvm, _sp, _list) \
|
||||
hlist_for_each_entry(_sp, _list, hash_link) \
|
||||
if (is_obsolete_sp((_kvm), (_sp))) { \
|
||||
} else
|
||||
|
||||
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
|
||||
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
|
||||
for_each_valid_sp(_kvm, _sp, \
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
|
||||
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
|
||||
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
|
||||
|
||||
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct list_head *invalid_list)
|
||||
@ -2109,7 +2117,7 @@ trace_get_page:
|
||||
sp->gfn = gfn;
|
||||
sp->role = role;
|
||||
hlist_add_head(&sp->hash_link, sp_list);
|
||||
if (!direct) {
|
||||
if (sp_has_gptes(sp)) {
|
||||
account_shadowed(vcpu->kvm, sp);
|
||||
if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
|
||||
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
|
||||
@ -2318,7 +2326,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
|
||||
/* Zapping children means active_mmu_pages has become unstable. */
|
||||
list_unstable = *nr_zapped;
|
||||
|
||||
if (!sp->role.invalid && !sp->role.direct)
|
||||
if (!sp->role.invalid && sp_has_gptes(sp))
|
||||
unaccount_shadowed(kvm, sp);
|
||||
|
||||
if (sp->unsync)
|
||||
@ -2498,7 +2506,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
||||
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
|
||||
r = 0;
|
||||
write_lock(&kvm->mmu_lock);
|
||||
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
|
||||
for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
|
||||
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
|
||||
sp->role.word);
|
||||
r = 1;
|
||||
@ -2560,7 +2568,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
|
||||
* that case, KVM must complete emulation of the guest TLB flush before
|
||||
* allowing shadow pages to become unsync (writable by the guest).
|
||||
*/
|
||||
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
|
||||
for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
|
||||
if (!can_unsync)
|
||||
return -EPERM;
|
||||
|
||||
@ -5298,7 +5306,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
||||
for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
|
||||
|
Loading…
Reference in New Issue
Block a user