mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
f2fs: compress: fix overwrite may reduce compress ratio unproperly
when overwrite only first block of cluster, since cluster is not full, it will call f2fs_write_raw_pages when f2fs_write_multi_pages, and cause the whole cluster become uncompressed eventhough data can be compressed. this may will make random write bench score reduce a lot. root# dd if=/dev/zero of=./fio-test bs=1M count=1 root# sync root# echo 3 > /proc/sys/vm/drop_caches root# f2fs_io get_cblocks ./fio-test root# dd if=/dev/zero of=./fio-test bs=4K count=1 oflag=direct conv=notrunc w/o patch: root# f2fs_io get_cblocks ./fio-test 189 w/ patch: root# f2fs_io get_cblocks ./fio-test 192 Signed-off-by: Fengnan Chang <changfengnan@vivo.com> Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
71f2c82062
commit
b368cc5e26
@ -881,6 +881,25 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
|
||||
return is_page_in_cluster(cc, index);
|
||||
}
|
||||
|
||||
bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
|
||||
int index, int nr_pages)
|
||||
{
|
||||
unsigned long pgidx;
|
||||
int i;
|
||||
|
||||
if (nr_pages - index < cc->cluster_size)
|
||||
return false;
|
||||
|
||||
pgidx = pvec->pages[index]->index;
|
||||
|
||||
for (i = 1; i < cc->cluster_size; i++) {
|
||||
if (pvec->pages[index + i]->index != pgidx + i)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cluster_has_invalid_data(struct compress_ctx *cc)
|
||||
{
|
||||
loff_t i_size = i_size_read(cc->inode);
|
||||
|
@ -3040,6 +3040,10 @@ readd:
|
||||
need_readd = false;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
void *fsdata = NULL;
|
||||
struct page *pagep;
|
||||
int ret2;
|
||||
|
||||
ret = f2fs_init_compress_ctx(&cc);
|
||||
if (ret) {
|
||||
done = 1;
|
||||
@ -3058,27 +3062,23 @@ readd:
|
||||
if (unlikely(f2fs_cp_error(sbi)))
|
||||
goto lock_page;
|
||||
|
||||
if (f2fs_cluster_is_empty(&cc)) {
|
||||
void *fsdata = NULL;
|
||||
struct page *pagep;
|
||||
int ret2;
|
||||
if (!f2fs_cluster_is_empty(&cc))
|
||||
goto lock_page;
|
||||
|
||||
ret2 = f2fs_prepare_compress_overwrite(
|
||||
ret2 = f2fs_prepare_compress_overwrite(
|
||||
inode, &pagep,
|
||||
page->index, &fsdata);
|
||||
if (ret2 < 0) {
|
||||
ret = ret2;
|
||||
done = 1;
|
||||
break;
|
||||
} else if (ret2 &&
|
||||
!f2fs_compress_write_end(inode,
|
||||
fsdata, page->index,
|
||||
1)) {
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
goto lock_page;
|
||||
if (ret2 < 0) {
|
||||
ret = ret2;
|
||||
done = 1;
|
||||
break;
|
||||
} else if (ret2 &&
|
||||
(!f2fs_compress_write_end(inode,
|
||||
fsdata, page->index, 1) ||
|
||||
!f2fs_all_cluster_page_loaded(&cc,
|
||||
&pvec, i, nr_pages))) {
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -4052,6 +4052,8 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
|
||||
block_t blkaddr);
|
||||
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
|
||||
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
|
||||
bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
|
||||
int index, int nr_pages);
|
||||
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
|
||||
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
|
||||
int f2fs_write_multi_pages(struct compress_ctx *cc,
|
||||
|
Loading…
Reference in New Issue
Block a user