mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm: put readahead pages in cache earlier
When populating the page cache for readahead, mappings that use ->readpages must populate the page cache themselves as the pages are passed on a linked list which would normally be used for the page cache's LRU. For mappings that use ->readpage or the upcoming ->readahead method, we can put the pages into the page cache as soon as they're allocated, which solves a race between readahead and direct IO. It also lets us remove the gfp argument from read_pages(). Use the new readahead_page() API to implement the repeated calls to ->readpage(), just like most filesystems will. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Cc: Chao Yu <yuchao0@huawei.com> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Dave Chinner <dchinner@redhat.com> Cc: Eric Biggers <ebiggers@google.com> Cc: Gao Xiang <gaoxiang25@huawei.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com> Cc: Miklos Szeredi <mszeredi@redhat.com> Link: http://lkml.kernel.org/r/20200414150233.24495-11-willy@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ef8153b609
commit
c1f6925e10
@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|||||||
EXPORT_SYMBOL(read_cache_pages);
|
EXPORT_SYMBOL(read_cache_pages);
|
||||||
|
|
||||||
static void read_pages(struct readahead_control *rac, struct list_head *pages,
|
static void read_pages(struct readahead_control *rac, struct list_head *pages,
|
||||||
gfp_t gfp)
|
bool skip_page)
|
||||||
{
|
{
|
||||||
const struct address_space_operations *aops = rac->mapping->a_ops;
|
const struct address_space_operations *aops = rac->mapping->a_ops;
|
||||||
|
struct page *page;
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
unsigned page_idx;
|
|
||||||
|
|
||||||
if (!readahead_count(rac))
|
if (!readahead_count(rac))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
|
|||||||
readahead_count(rac));
|
readahead_count(rac));
|
||||||
/* Clean up the remaining pages */
|
/* Clean up the remaining pages */
|
||||||
put_pages_list(pages);
|
put_pages_list(pages);
|
||||||
goto out;
|
rac->_index += rac->_nr_pages;
|
||||||
}
|
rac->_nr_pages = 0;
|
||||||
|
} else {
|
||||||
for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
|
while ((page = readahead_page(rac))) {
|
||||||
struct page *page = lru_to_page(pages);
|
|
||||||
list_del(&page->lru);
|
|
||||||
if (!add_to_page_cache_lru(page, rac->mapping, page->index,
|
|
||||||
gfp))
|
|
||||||
aops->readpage(rac->file, page);
|
aops->readpage(rac->file, page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
|
||||||
BUG_ON(!list_empty(pages));
|
BUG_ON(!list_empty(pages));
|
||||||
rac->_nr_pages = 0;
|
BUG_ON(readahead_count(rac));
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (skip_page)
|
||||||
|
rac->_index++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
|
|||||||
struct readahead_control rac = {
|
struct readahead_control rac = {
|
||||||
.mapping = mapping,
|
.mapping = mapping,
|
||||||
.file = filp,
|
.file = filp,
|
||||||
|
._index = index,
|
||||||
};
|
};
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
|
|||||||
if (index + i > end_index)
|
if (index + i > end_index)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
BUG_ON(index + i != rac._index + rac._nr_pages);
|
||||||
|
|
||||||
page = xa_load(&mapping->i_pages, index + i);
|
page = xa_load(&mapping->i_pages, index + i);
|
||||||
if (page && !xa_is_value(page)) {
|
if (page && !xa_is_value(page)) {
|
||||||
/*
|
/*
|
||||||
@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
|
|||||||
* contiguous pages before continuing with the next
|
* contiguous pages before continuing with the next
|
||||||
* batch.
|
* batch.
|
||||||
*/
|
*/
|
||||||
read_pages(&rac, &page_pool, gfp_mask);
|
read_pages(&rac, &page_pool, true);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
page = __page_cache_alloc(gfp_mask);
|
page = __page_cache_alloc(gfp_mask);
|
||||||
if (!page)
|
if (!page)
|
||||||
break;
|
break;
|
||||||
page->index = index + i;
|
if (mapping->a_ops->readpages) {
|
||||||
list_add(&page->lru, &page_pool);
|
page->index = index + i;
|
||||||
|
list_add(&page->lru, &page_pool);
|
||||||
|
} else if (add_to_page_cache_lru(page, mapping, index + i,
|
||||||
|
gfp_mask) < 0) {
|
||||||
|
put_page(page);
|
||||||
|
read_pages(&rac, &page_pool, true);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (i == nr_to_read - lookahead_size)
|
if (i == nr_to_read - lookahead_size)
|
||||||
SetPageReadahead(page);
|
SetPageReadahead(page);
|
||||||
rac._nr_pages++;
|
rac._nr_pages++;
|
||||||
@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
|
|||||||
* uptodate then the caller will launch readpage again, and
|
* uptodate then the caller will launch readpage again, and
|
||||||
* will then handle the error.
|
* will then handle the error.
|
||||||
*/
|
*/
|
||||||
read_pages(&rac, &page_pool, gfp_mask);
|
read_pages(&rac, &page_pool, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user