From 48987781eb1d1e8ded41f55cd5806615fda92c6e Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Sun, 22 Aug 2010 19:11:43 +0800 Subject: [PATCH] KVM: MMU: introduce gfn_to_page_many_atomic() function Introduce this function to get consecutive gfn's pages, it can reduce gup's overload, used by later patch Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 307d0e2c0f59..b837ec80885d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -289,6 +289,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_disable_largepages(void); void kvm_arch_flush_shadow(struct kvm *kvm); +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, + int nr_pages); + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); void kvm_release_page_clean(struct page *page); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 08bd304f8bc7..2eb0b7500a2a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -927,15 +927,25 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) return memslot - slots->memslots; } -unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, + gfn_t *nr_pages) { struct kvm_memory_slot *slot; slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); + + if (nr_pages) + *nr_pages = slot->npages - (gfn - slot->base_gfn); + return gfn_to_hva_memslot(slot, gfn); } + +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_hva_many(kvm, gfn, NULL); +} EXPORT_SYMBOL_GPL(gfn_to_hva); static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) @@ -1010,6 +1020,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm, return hva_to_pfn(kvm, addr, false); } +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, + int nr_pages) +{ + unsigned long addr; + gfn_t entry; + + addr = gfn_to_hva_many(kvm, gfn, &entry); + if (kvm_is_error_hva(addr)) + return -1; + + if (entry < nr_pages) + return 0; + + return __get_user_pages_fast(addr, nr_pages, 1, pages); +} +EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { pfn_t pfn;