ubi: fastmap: Get wl PEB even ec beyonds the 'max' if free PEBs are run out

This is the part 2 to fix cyclically reusing single fastmap data PEBs.

Consider one situation, if there are four free PEBs for fm_anchor, pool,
wl_pool and fastmap data PEB with erase counter 100, 100, 100, 5096
(ubi->beb_rsvd_pebs is 0). PEB with erase counter 5096 is always picked
for fastmap data according to the realization of find_wl_entry(), since
fastmap data PEB is not scheduled for wl, finally there are two PEBs
(fm data) with great erase counter than other PEBS.
Get wl PEB even its erase counter exceeds the 'max' in find_wl_entry()
when free PEBs are run out after filling pools and fm data. Then the PEB
with biggest erase conter is taken as wl PEB, it can be scheduled for wl.

Fixes: dbb7d2a88d ("UBI: Add fastmap core")
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217787
Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
Zhihao Cheng 2023-08-28 14:38:42 +08:00 committed by Richard Weinberger
parent eada823e6a
commit 761893bd49
2 changed files with 44 additions and 16 deletions

View File

@ -138,23 +138,44 @@ static void wait_free_pebs_for_pool(struct ubi_device *ubi)
}
/*
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
* left_free_count - returns the number of free pebs to fill fm pools
* @ubi: UBI device description object
*
* This helper function checks whether there are enough free pebs (deducted
* This helper function returns the number of free pebs (deducted
* by fastmap pebs) to fill fm_pool and fm_wl_pool.
*/
static bool has_enough_free_count(struct ubi_device *ubi)
static int left_free_count(struct ubi_device *ubi)
{
int fm_used = 0; // fastmap non anchor pebs.
if (!ubi->free.rb_node)
return false;
return 0;
if (!ubi->ro_mode && !ubi->fm_disabled)
fm_used = ubi->fm_size / ubi->leb_size - 1;
return ubi->free_count > fm_used;
return ubi->free_count - fm_used;
}
/*
* can_fill_pools - whether free PEBs will be left after filling pools
* @ubi: UBI device description object
* @free: current number of free PEBs
*
* Return %1 if there are still left free PEBs after filling pools,
* otherwise %0 is returned.
*/
static int can_fill_pools(struct ubi_device *ubi, int free)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
int pool_need = pool->max_size - pool->size +
wl_pool->max_size - wl_pool->size;
if (free - pool_need < 1)
return 0;
return 1;
}
/**
@ -199,7 +220,7 @@ void ubi_refill_pools_and_lock(struct ubi_device *ubi)
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
if (!has_enough_free_count(ubi))
if (left_free_count(ubi) <= 0)
break;
e = wl_get_wle(ubi);
@ -212,10 +233,13 @@ void ubi_refill_pools_and_lock(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
if (!has_enough_free_count(ubi))
int left_free = left_free_count(ubi);
if (left_free <= 0)
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
!can_fill_pools(ubi, left_free));
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
@ -355,12 +379,12 @@ static bool need_wear_leveling(struct ubi_device *ubi)
if (!e) {
if (!ubi->free.rb_node)
return false;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = e->ec;
} else {
ec = e->ec;
if (ubi->free.rb_node) {
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = max(ec, e->ec);
}
}

View File

@ -317,12 +317,14 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
* @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct rb_root *root, int diff)
struct rb_root *root, int diff,
int pick_max)
{
struct rb_node *p;
struct ubi_wl_entry *e;
@ -336,9 +338,11 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e1->ec >= max)
if (e1->ec >= max) {
if (pick_max)
e = e1;
p = p->rb_left;
else {
} else {
p = p->rb_right;
e = e1;
}
@ -375,7 +379,7 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
*/
e = may_reserve_for_fm(ubi, e, root);
} else
e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
return e;
}
@ -1048,7 +1052,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@ -2079,7 +2083,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
self_check_in_wl_tree(ubi, e, &ubi->free);
ubi->free_count--;
ubi_assert(ubi->free_count >= 0);