forked from Minki/linux
KVM: x86/mmu: Pass the memslot to the rmap callbacks
Pass the memslot to the rmap callbacks, it will be used when zapping collapsible SPTEs to verify the memslot is compatible with hugepages before zapping its SPTEs. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210213005015.1651772-5-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1b6d9d9ed5
commit
0a234f5dd0
@ -1165,7 +1165,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
|
|||||||
* - W bit on ad-disabled SPTEs.
|
* - W bit on ad-disabled SPTEs.
|
||||||
* Returns true iff any D or W bits were cleared.
|
* Returns true iff any D or W bits were cleared.
|
||||||
*/
|
*/
|
||||||
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
struct rmap_iterator iter;
|
struct rmap_iterator iter;
|
||||||
@ -1196,7 +1197,8 @@ static bool spte_set_dirty(u64 *sptep)
|
|||||||
return mmu_spte_update(sptep, spte);
|
return mmu_spte_update(sptep, spte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
struct rmap_iterator iter;
|
struct rmap_iterator iter;
|
||||||
@ -1260,7 +1262,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||||||
while (mask) {
|
while (mask) {
|
||||||
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
||||||
PG_LEVEL_4K, slot);
|
PG_LEVEL_4K, slot);
|
||||||
__rmap_clear_dirty(kvm, rmap_head);
|
__rmap_clear_dirty(kvm, rmap_head, slot);
|
||||||
|
|
||||||
/* clear the first set bit */
|
/* clear the first set bit */
|
||||||
mask &= mask - 1;
|
mask &= mask - 1;
|
||||||
@ -1325,7 +1327,8 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
|
|||||||
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
|
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
struct rmap_iterator iter;
|
struct rmap_iterator iter;
|
||||||
@ -1345,7 +1348,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
|||||||
struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
||||||
unsigned long data)
|
unsigned long data)
|
||||||
{
|
{
|
||||||
return kvm_zap_rmapp(kvm, rmap_head);
|
return kvm_zap_rmapp(kvm, rmap_head, slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||||
@ -5189,7 +5192,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
|
|||||||
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
|
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
|
||||||
|
|
||||||
/* The return value indicates if tlb flush on all vcpus is needed. */
|
/* The return value indicates if tlb flush on all vcpus is needed. */
|
||||||
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
|
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot);
|
||||||
|
|
||||||
/* The caller should hold mmu-lock before calling this function. */
|
/* The caller should hold mmu-lock before calling this function. */
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
@ -5203,7 +5207,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||||||
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
|
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
|
||||||
end_gfn, &iterator) {
|
end_gfn, &iterator) {
|
||||||
if (iterator.rmap)
|
if (iterator.rmap)
|
||||||
flush |= fn(kvm, iterator.rmap);
|
flush |= fn(kvm, iterator.rmap, memslot);
|
||||||
|
|
||||||
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
|
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
|
||||||
if (flush && lock_flush_tlb) {
|
if (flush && lock_flush_tlb) {
|
||||||
@ -5492,7 +5496,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool slot_rmap_write_protect(struct kvm *kvm,
|
static bool slot_rmap_write_protect(struct kvm *kvm,
|
||||||
struct kvm_rmap_head *rmap_head)
|
struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
return __rmap_write_protect(kvm, rmap_head, false);
|
return __rmap_write_protect(kvm, rmap_head, false);
|
||||||
}
|
}
|
||||||
@ -5526,7 +5531,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
||||||
struct kvm_rmap_head *rmap_head)
|
struct kvm_rmap_head *rmap_head,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
struct rmap_iterator iter;
|
struct rmap_iterator iter;
|
||||||
|
Loading…
Reference in New Issue
Block a user