mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
KVM: Remove unused kvm_vcpu_gfn_to_pfn_atomic
The last use of kvm_vcpu_gfn_to_pfn_atomic was removed by commit
1bbc60d0c7
("KVM: x86/mmu: Remove MMU auditing")
Remove it.
Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
Message-ID: <20241001141354.18009-3-linux@treblig.org>
[Adjust Documentation/virt/kvm/locking.rst. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
88a387cf9e
commit
bc07eea2f3
@ -136,7 +136,7 @@ For direct sp, we can easily avoid it since the spte of direct sp is fixed
|
|||||||
to gfn. For indirect sp, we disabled fast page fault for simplicity.
|
to gfn. For indirect sp, we disabled fast page fault for simplicity.
|
||||||
|
|
||||||
A solution for indirect sp could be to pin the gfn, for example via
|
A solution for indirect sp could be to pin the gfn, for example via
|
||||||
kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning:
|
gfn_to_pfn_memslot_atomic, before the cmpxchg. After the pinning:
|
||||||
|
|
||||||
- We have held the refcount of pfn; that means the pfn can not be freed and
|
- We have held the refcount of pfn; that means the pfn can not be freed and
|
||||||
be reused for another gfn.
|
be reused for another gfn.
|
||||||
|
@ -1313,7 +1313,6 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
|
|||||||
|
|
||||||
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
|
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
|
||||||
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
|
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
|
@ -3035,12 +3035,6 @@ kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gf
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
|
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
|
||||||
|
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
||||||
{
|
|
||||||
return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
|
|
||||||
|
|
||||||
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
|
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
|
||||||
{
|
{
|
||||||
return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
|
return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
|
||||||
|
Loading…
Reference in New Issue
Block a user