From a55861c800ae0593b7d343470918f6f84063e3b8 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy <vladimir@tuxera.com> Date: Thu, 2 Jan 2020 14:02:32 +0200 Subject: [PATCH 1/7] erofs: correct indentation of an assigned structure inside a function Trivial change, the expected indentation ruled by the coding style hasn't been met. Signed-off-by: Vladimir Zapolskiy <vladimir@tuxera.com> Link: https://lore.kernel.org/r/20200102120232.15074-1-vladimir@tuxera.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/xattr.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index 3585b84d2f20..50966f1c676e 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler; static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) { -static const struct xattr_handler *xattr_handler_map[] = { - [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, + static const struct xattr_handler *xattr_handler_map[] = { + [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, #ifdef CONFIG_EROFS_FS_POSIX_ACL - [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, - [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = - &posix_acl_default_xattr_handler, + [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = + &posix_acl_access_xattr_handler, + [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = + &posix_acl_default_xattr_handler, #endif - [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, + [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, #ifdef CONFIG_EROFS_FS_SECURITY - [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, + [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, #endif -}; + }; return idx && idx < ARRAY_SIZE(xattr_handler_map) ? xattr_handler_map[idx] : NULL; From 997626d8383871862d0774c50a351aa98dacc0eb Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy <vladimir@tuxera.com> Date: Thu, 2 Jan 2020 14:01:16 +0200 Subject: [PATCH 2/7] erofs: remove unused tag argument while finding a workgroup It is feasible to simplify erofs_find_workgroup() interface by removing an unused function argument. While formally the argument is used in the function itself, its assigned value is ignored on the caller side. Signed-off-by: Vladimir Zapolskiy <vladimir@tuxera.com> Link: https://lore.kernel.org/r/20200102120118.14979-2-vladimir@tuxera.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/internal.h | 2 +- fs/erofs/utils.c | 3 +-- fs/erofs/zdata.c | 3 +-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 1ed5beff7d11..c3d502b5fac7 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -401,7 +401,7 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr) #ifdef CONFIG_EROFS_FS_ZIP int erofs_workgroup_put(struct erofs_workgroup *grp); struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag); + pgoff_t index); int erofs_register_workgroup(struct super_block *sb, struct erofs_workgroup *grp, bool tag); void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 1e8e1450d5b0..4d1cf4d00dab 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -59,7 +59,7 @@ repeat: } struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag) + pgoff_t index) { struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_workgroup *grp; @@ -68,7 +68,6 @@ repeat: rcu_read_lock(); grp = radix_tree_lookup(&sbi->workstn_tree, index); if (grp) { - *tag = xa_pointer_tag(grp); grp = xa_untag_pointer(grp); if (erofs_workgroup_get(grp)) { diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index ca99425a4536..052d28391ce6 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt, struct z_erofs_pcluster *pcl; struct z_erofs_collection *cl; unsigned int length; - bool tag; - grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); + grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT); if (!grp) return -ENOENT; From e5e9a432036a40756db171e7b22ee63e69c41ef1 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy <vladimir@tuxera.com> Date: Thu, 2 Jan 2020 14:01:17 +0200 Subject: [PATCH 3/7] erofs: remove unused tag argument while registering a workgroup All workgroups are registered with tag value set to 0, to simplify erofs_register_workgroup() interface the tag argument can be removed, if its only value is sent down to the function body. Signed-off-by: Vladimir Zapolskiy <vladimir@tuxera.com> Link: https://lore.kernel.org/r/20200102120118.14979-3-vladimir@tuxera.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/internal.h | 2 +- fs/erofs/utils.c | 5 ++--- fs/erofs/zdata.c | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index c3d502b5fac7..c4c6dcdc89ad 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -403,7 +403,7 @@ int erofs_workgroup_put(struct erofs_workgroup *grp); struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, pgoff_t index); int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, bool tag); + struct erofs_workgroup *grp); void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 4d1cf4d00dab..7b47c56b89b7 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -83,8 +83,7 @@ repeat: } int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, - bool tag) + struct erofs_workgroup *grp) { struct erofs_sb_info *sbi; int err; @@ -102,7 +101,7 @@ int erofs_register_workgroup(struct super_block *sb, sbi = EROFS_SB(sb); xa_lock(&sbi->workstn_tree); - grp = xa_tag_pointer(grp, tag); + grp = xa_tag_pointer(grp, 0); /* * Bump up reference count before making this workgroup diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 052d28391ce6..4fedeb4496e4 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -437,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, */ mutex_trylock(&cl->lock); - err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); + err = erofs_register_workgroup(inode->i_sb, &pcl->obj); if (err) { mutex_unlock(&cl->lock); kmem_cache_free(pcluster_cachep, pcl); From e3915ad94bfa0e2c1818837af618e9adec74eb1b Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy <vladimir@tuxera.com> Date: Thu, 2 Jan 2020 14:01:18 +0200 Subject: [PATCH 4/7] erofs: remove void tagging/untagging of workgroup pointers Because workgroup pointers inserted to a radix tree are always tagged with a single value of 0, it is possible to remove tagging and untagging of the pointers completely. Signed-off-by: Vladimir Zapolskiy <vladimir@tuxera.com> Link: https://lore.kernel.org/r/20200102120118.14979-4-vladimir@tuxera.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/utils.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 7b47c56b89b7..fddc5059c930 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -68,8 +68,6 @@ repeat: rcu_read_lock(); grp = radix_tree_lookup(&sbi->workstn_tree, index); if (grp) { - grp = xa_untag_pointer(grp); - if (erofs_workgroup_get(grp)) { /* prefer to relax rcu read side */ rcu_read_unlock(); @@ -101,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb, sbi = EROFS_SB(sb); xa_lock(&sbi->workstn_tree); - grp = xa_tag_pointer(grp, 0); - /* * Bump up reference count before making this workgroup * visible to other users in order to avoid potential UAF @@ -173,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, - grp->index)) != grp); + DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp); /* * If managed cache is on, last refcount should indicate @@ -199,7 +194,7 @@ repeat: batch, first_index, PAGEVEC_SIZE); for (i = 0; i < found; ++i) { - struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); + struct erofs_workgroup *grp = batch[i]; first_index = grp->index + 1; From 4d2024370d877f9ac8b98694bcff666da6a5d333 Mon Sep 17 00:00:00 2001 From: Gao Xiang <gaoxiang25@huawei.com> Date: Tue, 7 Jan 2020 10:25:46 +0800 Subject: [PATCH 5/7] erofs: fix out-of-bound read for shifted uncompressed block rq->out[1] should be valid before accessing. Otherwise, in very rare cases, out-of-bound dirty onstack rq->out[1] can equal to *in and lead to unintended memmove behavior. Link: https://lore.kernel.org/r/20200107022546.19432-1-gaoxiang25@huawei.com Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend") Cc: <stable@vger.kernel.org> # 5.3+ Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/decompressor.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 2890a67a1ded..5779a15c2cd6 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, } src = kmap_atomic(*rq->in); - if (!rq->out[0]) { - dst = NULL; - } else { + if (rq->out[0]) { dst = kmap_atomic(rq->out[0]); memcpy(dst + rq->pageofs_out, src, righthalf); + kunmap_atomic(dst); } - if (rq->out[1] == *rq->in) { - memmove(src, src + righthalf, rq->pageofs_out); - } else if (nrpages_out == 2) { - if (dst) - kunmap_atomic(dst); + if (nrpages_out == 2) { DBG_BUGON(!rq->out[1]); - dst = kmap_atomic(rq->out[1]); - memcpy(dst, src + righthalf, rq->pageofs_out); + if (rq->out[1] == *rq->in) { + memmove(src, src + righthalf, rq->pageofs_out); + } else { + dst = kmap_atomic(rq->out[1]); + memcpy(dst, src + righthalf, rq->pageofs_out); + kunmap_atomic(dst); + } } - if (dst) - kunmap_atomic(dst); kunmap_atomic(src); return 0; } From 587a67b77789f822930d8f5e65bdd161c82e6365 Mon Sep 17 00:00:00 2001 From: Gao Xiang <gaoxiang25@huawei.com> Date: Tue, 21 Jan 2020 14:47:47 +0800 Subject: [PATCH 6/7] erofs: fold in postsubmit_is_all_bypassed() No need to introduce such separated helper since cache strategy compile configs were changed into runtime options instead in v5.4. No logic changes. Link: https://lore.kernel.org/r/20200121064747.138987-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/zdata.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 4fedeb4496e4..f63a893fe886 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1148,20 +1148,6 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[], - unsigned int nr_bios, bool force_fg) -{ - /* - * although background is preferred, no one is pending for submission. - * don't issue workqueue for decompression but drop it directly instead. - */ - if (force_fg || nr_bios) - return false; - - kvfree(q[JQ_SUBMIT]); - return true; -} - static bool z_erofs_submit_queue(struct super_block *sb, z_erofs_next_pcluster_t owned_head, struct list_head *pagepool, @@ -1262,9 +1248,14 @@ skippage: if (bio) submit_bio(bio); - if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg)) + /* + * although background is preferred, no one is pending for submission. + * don't issue workqueue for decompression but drop it directly instead. + */ + if (!*force_fg && !nr_bios) { + kvfree(q[JQ_SUBMIT]); return true; - + } z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); return true; } From 1e4a295567949ee8e6896a7db70afd1b6246966e Mon Sep 17 00:00:00 2001 From: Gao Xiang <gaoxiang25@huawei.com> Date: Tue, 21 Jan 2020 14:48:19 +0800 Subject: [PATCH 7/7] erofs: clean up z_erofs_submit_queue() A label and extra variables will be eliminated, which is more cleaner. Link: https://lore.kernel.org/r/20200121064819.139469-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> --- fs/erofs/zdata.c | 95 ++++++++++++++++++++---------------------------- 1 file changed, 40 insertions(+), 55 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index f63a893fe886..80e47f07d946 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1148,7 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static bool z_erofs_submit_queue(struct super_block *sb, +static void z_erofs_submit_queue(struct super_block *sb, z_erofs_next_pcluster_t owned_head, struct list_head *pagepool, struct z_erofs_decompressqueue *fgq, @@ -1157,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb, struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; - struct bio *bio; void *bi_private; /* since bio will be NULL, no need to initialize last_index */ pgoff_t uninitialized_var(last_index); - bool force_submit = false; - unsigned int nr_bios; + unsigned int nr_bios = 0; + struct bio *bio = NULL; - if (owned_head == Z_EROFS_PCLUSTER_TAIL) - return false; - - force_submit = false; - bio = NULL; - nr_bios = 0; bi_private = jobqueueset_init(sb, q, fgq, force_fg); qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; @@ -1179,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb, do { struct z_erofs_pcluster *pcl; - unsigned int clusterpages; - pgoff_t first_index; - struct page *page; - unsigned int i = 0, bypass = 0; - int err; + pgoff_t cur, end; + unsigned int i = 0; + bool bypass = true; /* no possible 'owned_head' equals the following */ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); @@ -1191,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb, pcl = container_of(owned_head, struct z_erofs_pcluster, next); - clusterpages = BIT(pcl->clusterbits); + cur = pcl->obj.index; + end = cur + BIT(pcl->clusterbits); /* close the main owned chain at first */ owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, Z_EROFS_PCLUSTER_TAIL_CLOSED); - first_index = pcl->obj.index; - force_submit |= (first_index != last_index + 1); + do { + struct page *page; + int err; -repeat: - page = pickup_page_for_submission(pcl, i, pagepool, - MNGD_MAPPING(sbi), - GFP_NOFS); - if (!page) { - force_submit = true; - ++bypass; - goto skippage; - } + page = pickup_page_for_submission(pcl, i++, pagepool, + MNGD_MAPPING(sbi), + GFP_NOFS); + if (!page) + continue; - if (bio && force_submit) { + if (bio && cur != last_index + 1) { submit_bio_retry: - submit_bio(bio); - bio = NULL; - } + submit_bio(bio); + bio = NULL; + } - if (!bio) { - bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + if (!bio) { + bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - bio->bi_end_io = z_erofs_decompressqueue_endio; - bio_set_dev(bio, sb->s_bdev); - bio->bi_iter.bi_sector = (sector_t)(first_index + i) << - LOG_SECTORS_PER_BLOCK; - bio->bi_private = bi_private; - bio->bi_opf = REQ_OP_READ; + bio->bi_end_io = z_erofs_decompressqueue_endio; + bio_set_dev(bio, sb->s_bdev); + bio->bi_iter.bi_sector = (sector_t)cur << + LOG_SECTORS_PER_BLOCK; + bio->bi_private = bi_private; + bio->bi_opf = REQ_OP_READ; + ++nr_bios; + } - ++nr_bios; - } + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (err < PAGE_SIZE) + goto submit_bio_retry; - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (err < PAGE_SIZE) - goto submit_bio_retry; + last_index = cur; + bypass = false; + } while (++cur < end); - force_submit = false; - last_index = first_index + i; -skippage: - if (++i < clusterpages) - goto repeat; - - if (bypass < clusterpages) + if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; else move_to_bypass_jobqueue(pcl, qtail, owned_head); @@ -1254,10 +1240,9 @@ skippage: */ if (!*force_fg && !nr_bios) { kvfree(q[JQ_SUBMIT]); - return true; + return; } z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); - return true; } static void z_erofs_runqueue(struct super_block *sb, @@ -1266,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb, { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; - if (!z_erofs_submit_queue(sb, clt->owned_head, - pagepool, io, &force_fg)) + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) return; + z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg); /* handle bypass queue (no i/o pclusters) immediately */ z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);