mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
f2fs: support lower priority asynchronous readahead in ra_meta_pages
Now, we use ra_meta_pages to reads continuous physical blocks as much as possible to improve performance of following reads. However, ra_meta_pages uses a synchronous readahead approach by submitting bio with READ, as READ is with high priority, it can not be used in the case of preloading blocks, and it's not sure when these RAed pages will be used. This patch supports asynchronous readahead in ra_meta_pages by tagging bio with READA flag in order to allow preloading. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
2b947003fa
commit
26879fb101
@ -140,7 +140,8 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
|
|||||||
/*
|
/*
|
||||||
* Readahead CP/NAT/SIT/SSA pages
|
* Readahead CP/NAT/SIT/SSA pages
|
||||||
*/
|
*/
|
||||||
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
|
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
|
||||||
|
int type, bool sync)
|
||||||
{
|
{
|
||||||
block_t prev_blk_addr = 0;
|
block_t prev_blk_addr = 0;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -148,7 +149,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
|
|||||||
struct f2fs_io_info fio = {
|
struct f2fs_io_info fio = {
|
||||||
.sbi = sbi,
|
.sbi = sbi,
|
||||||
.type = META,
|
.type = META,
|
||||||
.rw = READ_SYNC | REQ_META | REQ_PRIO,
|
.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
|
||||||
.encrypted_page = NULL,
|
.encrypted_page = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -214,7 +215,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
|
|||||||
f2fs_put_page(page, 0);
|
f2fs_put_page(page, 0);
|
||||||
|
|
||||||
if (readahead)
|
if (readahead)
|
||||||
ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
|
ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int f2fs_write_meta_page(struct page *page,
|
static int f2fs_write_meta_page(struct page *page,
|
||||||
@ -521,7 +522,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
|
|||||||
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
|
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
|
||||||
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
|
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
|
||||||
|
|
||||||
ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);
|
ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
|
||||||
|
|
||||||
for (i = 0; i < orphan_blocks; i++) {
|
for (i = 0; i < orphan_blocks; i++) {
|
||||||
struct page *page = get_meta_page(sbi, start_blk + i);
|
struct page *page = get_meta_page(sbi, start_blk + i);
|
||||||
|
@ -1808,7 +1808,7 @@ struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
|
|||||||
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
|
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
|
||||||
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
|
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
|
||||||
bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
|
bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
|
||||||
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
|
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
|
||||||
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
|
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
|
||||||
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
|
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
|
||||||
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
|
||||||
|
@ -840,7 +840,7 @@ gc_more:
|
|||||||
/* readahead multi ssa blocks those have contiguous address */
|
/* readahead multi ssa blocks those have contiguous address */
|
||||||
if (sbi->segs_per_sec > 1)
|
if (sbi->segs_per_sec > 1)
|
||||||
ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
|
ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
|
||||||
META_SSA);
|
META_SSA, true);
|
||||||
|
|
||||||
for (i = 0; i < sbi->segs_per_sec; i++) {
|
for (i = 0; i < sbi->segs_per_sec; i++) {
|
||||||
/*
|
/*
|
||||||
|
@ -1529,7 +1529,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* readahead nat pages to be scanned */
|
/* readahead nat pages to be scanned */
|
||||||
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
|
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
|
||||||
|
META_NAT, true);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
struct page *page = get_current_nat_page(sbi, nid);
|
struct page *page = get_current_nat_page(sbi, nid);
|
||||||
@ -1804,7 +1805,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
|
|||||||
nrpages = min(last_offset - i, bio_blocks);
|
nrpages = min(last_offset - i, bio_blocks);
|
||||||
|
|
||||||
/* readahead node pages */
|
/* readahead node pages */
|
||||||
ra_meta_pages(sbi, addr, nrpages, META_POR);
|
ra_meta_pages(sbi, addr, nrpages, META_POR, true);
|
||||||
|
|
||||||
for (idx = addr; idx < addr + nrpages; idx++) {
|
for (idx = addr; idx < addr + nrpages; idx++) {
|
||||||
struct page *page = get_tmp_page(sbi, idx);
|
struct page *page = get_tmp_page(sbi, idx);
|
||||||
|
@ -180,7 +180,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
|
|||||||
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
||||||
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
||||||
|
|
||||||
ra_meta_pages(sbi, blkaddr, 1, META_POR);
|
ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
struct fsync_inode_entry *entry;
|
struct fsync_inode_entry *entry;
|
||||||
|
@ -1621,7 +1621,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
|
|||||||
|
|
||||||
if (npages >= 2)
|
if (npages >= 2)
|
||||||
ra_meta_pages(sbi, start_sum_block(sbi), npages,
|
ra_meta_pages(sbi, start_sum_block(sbi), npages,
|
||||||
META_CP);
|
META_CP, true);
|
||||||
|
|
||||||
/* restore for compacted data summary */
|
/* restore for compacted data summary */
|
||||||
if (read_compacted_summaries(sbi))
|
if (read_compacted_summaries(sbi))
|
||||||
@ -1631,7 +1631,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
|
|||||||
|
|
||||||
if (__exist_node_summaries(sbi))
|
if (__exist_node_summaries(sbi))
|
||||||
ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
|
ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
|
||||||
NR_CURSEG_TYPE - type, META_CP);
|
NR_CURSEG_TYPE - type, META_CP, true);
|
||||||
|
|
||||||
for (; type <= CURSEG_COLD_NODE; type++) {
|
for (; type <= CURSEG_COLD_NODE; type++) {
|
||||||
err = read_normal_summaries(sbi, type);
|
err = read_normal_summaries(sbi, type);
|
||||||
@ -2118,7 +2118,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
|
|||||||
int nrpages = MAX_BIO_BLOCKS(sbi);
|
int nrpages = MAX_BIO_BLOCKS(sbi);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
|
readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
|
||||||
|
|
||||||
start = start_blk * sit_i->sents_per_block;
|
start = start_blk * sit_i->sents_per_block;
|
||||||
end = (start_blk + readed) * sit_i->sents_per_block;
|
end = (start_blk + readed) * sit_i->sents_per_block;
|
||||||
|
Loading…
Reference in New Issue
Block a user