forked from Minki/linux
erofs: drop the old pagevec approach
Remove the old pagevec approach but keep z_erofs_page_type for now. It will be reworked in the following commits as well. Also rename Z_EROFS_NR_INLINE_PAGEVECS as Z_EROFS_INLINE_BVECS with the new value 2 since it's actually enough to bootstrap. Acked-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20220715154203.48093-6-hsiangkao@linux.alibaba.com
This commit is contained in:
parent
06a304cd9c
commit
387bab8716
@ -27,6 +27,17 @@ static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
|
||||
_PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
|
||||
};
|
||||
|
||||
/* (obsoleted) page type for online pages */
|
||||
enum z_erofs_page_type {
|
||||
/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
|
||||
Z_EROFS_PAGE_TYPE_EXCLUSIVE,
|
||||
|
||||
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
|
||||
|
||||
Z_EROFS_VLE_PAGE_TYPE_HEAD,
|
||||
Z_EROFS_VLE_PAGE_TYPE_MAX
|
||||
};
|
||||
|
||||
struct z_erofs_bvec_iter {
|
||||
struct page *bvpage;
|
||||
struct z_erofs_bvset *bvset;
|
||||
@ -248,7 +259,7 @@ enum z_erofs_collectmode {
|
||||
* a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
|
||||
* could be dispatched into bypass queue later due to uptodated managed
|
||||
* pages. All related online pages cannot be reused for inplace I/O (or
|
||||
* pagevec) since it can be directly decoded without I/O submission.
|
||||
* bvpage) since it can be directly decoded without I/O submission.
|
||||
*/
|
||||
COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
|
||||
/*
|
||||
@ -273,7 +284,6 @@ struct z_erofs_decompress_frontend {
|
||||
struct inode *const inode;
|
||||
struct erofs_map_blocks map;
|
||||
struct z_erofs_bvec_iter biter;
|
||||
struct z_erofs_pagevec_ctor vector;
|
||||
|
||||
struct page *candidate_bvpage;
|
||||
struct z_erofs_pcluster *pcl, *tailpcl;
|
||||
@ -636,7 +646,7 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
|
||||
return ret;
|
||||
}
|
||||
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
|
||||
Z_EROFS_NR_INLINE_PAGEVECS, fe->pcl->vcnt);
|
||||
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
|
||||
/* since file-backed online pages are traversed in reverse order */
|
||||
fe->icpage_ptr = fe->pcl->compressed_pages +
|
||||
z_erofs_pclusterpages(fe->pcl);
|
||||
@ -776,7 +786,7 @@ hitted:
|
||||
* Ensure the current partial page belongs to this submit chain rather
|
||||
* than other concurrent submit chains or the noio(bypass) chain since
|
||||
* those chains are handled asynchronously thus the page cannot be used
|
||||
* for inplace I/O or pagevec (should be processed in strict order.)
|
||||
* for inplace I/O or bvpage (should be processed in a strict order.)
|
||||
*/
|
||||
tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED &&
|
||||
fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
|
||||
@ -871,8 +881,7 @@ static int z_erofs_parse_out_bvecs(struct z_erofs_pcluster *pcl,
|
||||
struct page *old_bvpage;
|
||||
int i, err = 0;
|
||||
|
||||
z_erofs_bvec_iter_begin(&biter, &pcl->bvset,
|
||||
Z_EROFS_NR_INLINE_PAGEVECS, 0);
|
||||
z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
|
||||
for (i = 0; i < pcl->vcnt; ++i) {
|
||||
struct z_erofs_bvec bvec;
|
||||
unsigned int pagenr;
|
||||
|
@ -7,10 +7,10 @@
|
||||
#define __EROFS_FS_ZDATA_H
|
||||
|
||||
#include "internal.h"
|
||||
#include "zpvec.h"
|
||||
#include "tagptr.h"
|
||||
|
||||
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
|
||||
#define Z_EROFS_NR_INLINE_PAGEVECS 3
|
||||
#define Z_EROFS_INLINE_BVECS 2
|
||||
|
||||
#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
|
||||
#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
|
||||
@ -34,7 +34,7 @@ struct name { \
|
||||
struct z_erofs_bvec bvec[total]; \
|
||||
}
|
||||
__Z_EROFS_BVSET(z_erofs_bvset,);
|
||||
__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_NR_INLINE_PAGEVECS);
|
||||
__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
|
||||
|
||||
/*
|
||||
* Structure fields follow one of the following exclusion rules.
|
||||
@ -69,9 +69,6 @@ struct z_erofs_pcluster {
|
||||
unsigned short nr_pages;
|
||||
|
||||
union {
|
||||
/* L: inline a certain number of pagevecs for bootstrap */
|
||||
erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
|
||||
|
||||
/* L: inline a certain number of bvec for bootstrap */
|
||||
struct z_erofs_bvset_inline bvset;
|
||||
|
||||
|
159
fs/erofs/zpvec.h
159
fs/erofs/zpvec.h
@ -1,159 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2018 HUAWEI, Inc.
|
||||
* https://www.huawei.com/
|
||||
*/
|
||||
#ifndef __EROFS_FS_ZPVEC_H
|
||||
#define __EROFS_FS_ZPVEC_H
|
||||
|
||||
#include "tagptr.h"
|
||||
|
||||
/* page type in pagevec for decompress subsystem */
|
||||
enum z_erofs_page_type {
|
||||
/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
|
||||
Z_EROFS_PAGE_TYPE_EXCLUSIVE,
|
||||
|
||||
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
|
||||
|
||||
Z_EROFS_VLE_PAGE_TYPE_HEAD,
|
||||
Z_EROFS_VLE_PAGE_TYPE_MAX
|
||||
};
|
||||
|
||||
extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
|
||||
__bad_page_type_exclusive(void);
|
||||
|
||||
/* pagevec tagged pointer */
|
||||
typedef tagptr2_t erofs_vtptr_t;
|
||||
|
||||
/* pagevec collector */
|
||||
struct z_erofs_pagevec_ctor {
|
||||
struct page *curr, *next;
|
||||
erofs_vtptr_t *pages;
|
||||
|
||||
unsigned int nr, index;
|
||||
};
|
||||
|
||||
static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
|
||||
bool atomic)
|
||||
{
|
||||
if (!ctor->curr)
|
||||
return;
|
||||
|
||||
if (atomic)
|
||||
kunmap_atomic(ctor->pages);
|
||||
else
|
||||
kunmap(ctor->curr);
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
|
||||
unsigned int nr)
|
||||
{
|
||||
unsigned int index;
|
||||
|
||||
/* keep away from occupied pages */
|
||||
if (ctor->next)
|
||||
return ctor->next;
|
||||
|
||||
for (index = 0; index < nr; ++index) {
|
||||
const erofs_vtptr_t t = ctor->pages[index];
|
||||
const unsigned int tags = tagptr_unfold_tags(t);
|
||||
|
||||
if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
|
||||
return tagptr_unfold_ptr(t);
|
||||
}
|
||||
DBG_BUGON(nr >= ctor->nr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
|
||||
bool atomic)
|
||||
{
|
||||
struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
|
||||
|
||||
z_erofs_pagevec_ctor_exit(ctor, atomic);
|
||||
|
||||
ctor->curr = next;
|
||||
ctor->next = NULL;
|
||||
ctor->pages = atomic ?
|
||||
kmap_atomic(ctor->curr) : kmap(ctor->curr);
|
||||
|
||||
ctor->nr = PAGE_SIZE / sizeof(struct page *);
|
||||
ctor->index = 0;
|
||||
}
|
||||
|
||||
static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
|
||||
unsigned int nr,
|
||||
erofs_vtptr_t *pages,
|
||||
unsigned int i)
|
||||
{
|
||||
ctor->nr = nr;
|
||||
ctor->curr = ctor->next = NULL;
|
||||
ctor->pages = pages;
|
||||
|
||||
if (i >= nr) {
|
||||
i -= nr;
|
||||
z_erofs_pagevec_ctor_pagedown(ctor, false);
|
||||
while (i > ctor->nr) {
|
||||
i -= ctor->nr;
|
||||
z_erofs_pagevec_ctor_pagedown(ctor, false);
|
||||
}
|
||||
}
|
||||
ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
|
||||
ctor->index = i;
|
||||
}
|
||||
|
||||
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
|
||||
struct page *page,
|
||||
enum z_erofs_page_type type,
|
||||
bool pvec_safereuse)
|
||||
{
|
||||
if (!ctor->next) {
|
||||
/* some pages cannot be reused as pvec safely without I/O */
|
||||
if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
|
||||
type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
|
||||
|
||||
if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
|
||||
ctor->index + 1 == ctor->nr)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ctor->index >= ctor->nr)
|
||||
z_erofs_pagevec_ctor_pagedown(ctor, false);
|
||||
|
||||
/* exclusive page type must be 0 */
|
||||
if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
|
||||
__bad_page_type_exclusive();
|
||||
|
||||
/* should remind that collector->next never equal to 1, 2 */
|
||||
if (type == (uintptr_t)ctor->next) {
|
||||
ctor->next = page;
|
||||
}
|
||||
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
|
||||
enum z_erofs_page_type *type)
|
||||
{
|
||||
erofs_vtptr_t t;
|
||||
|
||||
if (ctor->index >= ctor->nr) {
|
||||
DBG_BUGON(!ctor->next);
|
||||
z_erofs_pagevec_ctor_pagedown(ctor, true);
|
||||
}
|
||||
|
||||
t = ctor->pages[ctor->index];
|
||||
|
||||
*type = tagptr_unfold_tags(t);
|
||||
|
||||
/* should remind that collector->next never equal to 1, 2 */
|
||||
if (*type == (uintptr_t)ctor->next)
|
||||
ctor->next = tagptr_unfold_ptr(t);
|
||||
|
||||
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
|
||||
return tagptr_unfold_ptr(t);
|
||||
}
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user