mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
erofs: remove the member readahead from struct z_erofs_decompress_frontend
The struct member is only used to add REQ_RAHEAD during I/O submission. So it is cleaner to pass it as a parameter than keep it in the struct. Also, rename function z_erofs_get_sync_decompress_policy() to z_erofs_is_sync_decompress() for better clarity and conciseness. Signed-off-by: Yue Hu <huyue2@coolpad.com> Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20230524063944.1655-1-zbestahu@gmail.com Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
parent
597e2953ae
commit
ef4b4b46c6
@ -548,7 +548,6 @@ struct z_erofs_decompress_frontend {
|
||||
z_erofs_next_pcluster_t owned_head;
|
||||
enum z_erofs_pclustermode mode;
|
||||
|
||||
bool readahead;
|
||||
/* used for applying cache strategy on the fly */
|
||||
bool backmost;
|
||||
erofs_off_t headoffset;
|
||||
@ -1104,7 +1103,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
|
||||
static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
|
||||
unsigned int readahead_pages)
|
||||
{
|
||||
/* auto: enable for read_folio, disable for readahead */
|
||||
@ -1672,7 +1671,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
struct page **pagepool,
|
||||
struct z_erofs_decompressqueue *fgq,
|
||||
bool *force_fg)
|
||||
bool *force_fg, bool readahead)
|
||||
{
|
||||
struct super_block *sb = f->inode->i_sb;
|
||||
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
|
||||
@ -1763,7 +1762,7 @@ submit_bio_retry:
|
||||
bio->bi_iter.bi_sector = (sector_t)cur <<
|
||||
(sb->s_blocksize_bits - 9);
|
||||
bio->bi_private = q[JQ_SUBMIT];
|
||||
if (f->readahead)
|
||||
if (readahead)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
++nr_bios;
|
||||
}
|
||||
@ -1799,13 +1798,13 @@ submit_bio_retry:
|
||||
}
|
||||
|
||||
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
|
||||
struct page **pagepool, bool force_fg)
|
||||
struct page **pagepool, bool force_fg, bool ra)
|
||||
{
|
||||
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
|
||||
|
||||
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||
return;
|
||||
z_erofs_submit_queue(f, pagepool, io, &force_fg);
|
||||
z_erofs_submit_queue(f, pagepool, io, &force_fg, ra);
|
||||
|
||||
/* handle bypass queue (no i/o pclusters) immediately */
|
||||
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
|
||||
@ -1903,8 +1902,8 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
|
||||
(void)z_erofs_collector_end(&f);
|
||||
|
||||
/* if some compressed cluster ready, need submit them anyway */
|
||||
z_erofs_runqueue(&f, &pagepool,
|
||||
z_erofs_get_sync_decompress_policy(sbi, 0));
|
||||
z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0),
|
||||
false);
|
||||
|
||||
if (err)
|
||||
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
|
||||
@ -1922,7 +1921,6 @@ static void z_erofs_readahead(struct readahead_control *rac)
|
||||
struct page *pagepool = NULL, *head = NULL, *page;
|
||||
unsigned int nr_pages;
|
||||
|
||||
f.readahead = true;
|
||||
f.headoffset = readahead_pos(rac);
|
||||
|
||||
z_erofs_pcluster_readmore(&f, rac, f.headoffset +
|
||||
@ -1953,7 +1951,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
|
||||
(void)z_erofs_collector_end(&f);
|
||||
|
||||
z_erofs_runqueue(&f, &pagepool,
|
||||
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
|
||||
z_erofs_is_sync_decompress(sbi, nr_pages), true);
|
||||
erofs_put_metabuf(&f.map.buf);
|
||||
erofs_release_pages(&pagepool);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user