shmem: Convert shmem_alloc_hugepage to XArray

xa_find() is a slightly easier API to use than
radix_tree_gang_lookup_slot() because it contains its own RCU locking.
This commit removes the last user of radix_tree_gang_lookup_slot()
so remove the function too.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
Matthew Wilcox 2017-12-01 22:13:06 -05:00
parent 552446a416
commit 7b8d046fba
3 changed files with 6 additions and 58 deletions

View File

@ -147,12 +147,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
* radix_tree_lookup_slot * radix_tree_lookup_slot
* radix_tree_tag_get * radix_tree_tag_get
* radix_tree_gang_lookup * radix_tree_gang_lookup
* radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot * radix_tree_gang_lookup_tag_slot
* radix_tree_tagged * radix_tree_tagged
* *
* The first 8 functions are able to be called locklessly, using RCU. The * The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock() * caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be * regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently. * running concurrently.
@ -263,9 +262,6 @@ void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index, void **results, unsigned long first_index,
unsigned int max_items); unsigned int max_items);
unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask); int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);

View File

@ -1097,7 +1097,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
* @slot: pointer to slot * @slot: pointer to slot
* @item: new item to store in the slot. * @item: new item to store in the slot.
* *
* For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), * For use with radix_tree_lookup_slot() and
* radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
* across slot lookup and replacement. * across slot lookup and replacement.
* *
@ -1731,48 +1731,6 @@ radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
} }
EXPORT_SYMBOL(radix_tree_gang_lookup); EXPORT_SYMBOL(radix_tree_gang_lookup);
/**
* radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
* @root: radix tree root
* @results: where the results of the lookup are placed
* @indices: where their indices should be placed (but usually NULL)
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
*
* Performs an index-ascending scan of the tree for present items. Places
* their slots at *@results and returns the number of items which were
* placed at *@results.
*
* The implementation is naive.
*
* Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
* be dereferenced with radix_tree_deref_slot, and if using only RCU
* protection, radix_tree_deref_slot may fail requiring a retry.
*/
unsigned int
radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
results[ret] = slot;
if (indices)
indices[ret] = iter.index;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
/** /**
* radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
* based on a tag * based on a tag

View File

@ -1431,23 +1431,17 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
struct vm_area_struct pvma; struct vm_area_struct pvma;
struct inode *inode = &info->vfs_inode; struct address_space *mapping = info->vfs_inode.i_mapping;
struct address_space *mapping = inode->i_mapping; pgoff_t hindex;
pgoff_t idx, hindex;
void __rcu **results;
struct page *page; struct page *page;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return NULL; return NULL;
hindex = round_down(index, HPAGE_PMD_NR); hindex = round_down(index, HPAGE_PMD_NR);
rcu_read_lock(); if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
if (radix_tree_gang_lookup_slot(&mapping->i_pages, &results, &idx, XA_PRESENT))
hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
rcu_read_unlock();
return NULL; return NULL;
}
rcu_read_unlock();
shmem_pseudo_vma_init(&pvma, info, hindex); shmem_pseudo_vma_init(&pvma, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,