mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
bcachefs: KEY_TYPE_discard is no longer used
KEY_TYPE_discard used to be used for extent whiteouts, but when handling over overlapping extents was lifted above the core btree code it became unused. This patch updates various code to reflect that. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
f2785955bb
commit
c052cf82f3
@ -103,7 +103,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
|
||||
sort_iter_sort(iter, key_sort_fix_overlapping_cmp);
|
||||
|
||||
while ((k = sort_iter_peek(iter))) {
|
||||
if (!bkey_whiteout(k) &&
|
||||
if (!bkey_deleted(k) &&
|
||||
!should_drop_next_key(iter)) {
|
||||
bkey_copy(out, k);
|
||||
btree_keys_account_key_add(&nr, 0, out);
|
||||
@ -123,7 +123,7 @@ static void extent_sort_append(struct bch_fs *c,
|
||||
struct bkey_packed **out,
|
||||
struct bkey_s k)
|
||||
{
|
||||
if (!bkey_whiteout(k.k)) {
|
||||
if (!bkey_deleted(k.k)) {
|
||||
if (!bch2_bkey_pack_key(*out, k.k, f))
|
||||
memcpy_u64s_small(*out, k.k, BKEY_U64s);
|
||||
|
||||
@ -148,7 +148,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
|
||||
memset(&nr, 0, sizeof(nr));
|
||||
|
||||
while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
|
||||
if (filter_whiteouts && bkey_whiteout(in))
|
||||
if (filter_whiteouts && bkey_deleted(in))
|
||||
continue;
|
||||
|
||||
if (bch2_bkey_transform(out_f, out, bkey_packed(in)
|
||||
@ -181,7 +181,7 @@ bch2_sort_repack_merge(struct bch_fs *c,
|
||||
bch2_bkey_buf_init(&k);
|
||||
|
||||
while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
|
||||
if (filter_whiteouts && bkey_whiteout(k_packed))
|
||||
if (filter_whiteouts && bkey_deleted(k_packed))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -227,7 +227,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
|
||||
while ((in = sort_iter_next(iter, sort_keys_cmp))) {
|
||||
bool needs_whiteout = false;
|
||||
|
||||
if (bkey_whiteout(in) &&
|
||||
if (bkey_deleted(in) &&
|
||||
(filter_whiteouts || !in->needs_whiteout))
|
||||
continue;
|
||||
|
||||
@ -239,7 +239,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
|
||||
in = sort_iter_next(iter, sort_keys_cmp);
|
||||
}
|
||||
|
||||
if (bkey_whiteout(in)) {
|
||||
if (bkey_deleted(in)) {
|
||||
memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
|
||||
set_bkeyp_val_u64s(f, out, 0);
|
||||
} else {
|
||||
|
@ -132,7 +132,7 @@ void __bch2_verify_btree_nr_keys(struct btree *b)
|
||||
|
||||
for_each_bset(b, t)
|
||||
bset_tree_for_each_key(b, t, k)
|
||||
if (!bkey_whiteout(k))
|
||||
if (!bkey_deleted(k))
|
||||
btree_keys_account_key_add(&nr, t - b->set, k);
|
||||
|
||||
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
|
||||
@ -1108,7 +1108,7 @@ void bch2_bset_insert(struct btree *b,
|
||||
if (bch2_bkey_pack_key(&packed, &insert->k, f))
|
||||
src = &packed;
|
||||
|
||||
if (!bkey_whiteout(&insert->k))
|
||||
if (!bkey_deleted(&insert->k))
|
||||
btree_keys_account_key_add(&b->nr, t - b->set, src);
|
||||
|
||||
if (src->u64s != clobber_u64s) {
|
||||
@ -1645,15 +1645,14 @@ found:
|
||||
return prev;
|
||||
}
|
||||
|
||||
struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
|
||||
struct btree *b,
|
||||
unsigned min_key_type)
|
||||
struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
|
||||
struct btree *b)
|
||||
{
|
||||
struct bkey_packed *prev;
|
||||
|
||||
do {
|
||||
prev = bch2_btree_node_iter_prev_all(iter, b);
|
||||
} while (prev && prev->type < min_key_type);
|
||||
} while (prev && bkey_deleted(prev));
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
|
||||
static inline struct bkey_packed *
|
||||
bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
|
||||
{
|
||||
return bch2_bkey_prev_filter(b, t, k, KEY_TYPE_discard + 1);
|
||||
return bch2_bkey_prev_filter(b, t, k, 1);
|
||||
}
|
||||
|
||||
enum bch_extent_overlap {
|
||||
@ -521,33 +521,23 @@ __bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
|
||||
}
|
||||
|
||||
static inline struct bkey_packed *
|
||||
bch2_btree_node_iter_peek_filter(struct btree_node_iter *iter,
|
||||
struct btree *b,
|
||||
unsigned min_key_type)
|
||||
bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b)
|
||||
{
|
||||
while (!bch2_btree_node_iter_end(iter)) {
|
||||
struct bkey_packed *k = __bch2_btree_node_iter_peek_all(iter, b);
|
||||
|
||||
if (k->type >= min_key_type)
|
||||
return k;
|
||||
|
||||
bch2_btree_node_iter_advance(iter, b);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct bkey_packed *
|
||||
bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
|
||||
struct btree *b)
|
||||
{
|
||||
return bch2_btree_node_iter_peek_filter(iter, b, 0);
|
||||
return !bch2_btree_node_iter_end(iter)
|
||||
? __btree_node_offset_to_key(b, iter->data->k)
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static inline struct bkey_packed *
|
||||
bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
|
||||
{
|
||||
return bch2_btree_node_iter_peek_filter(iter, b, KEY_TYPE_discard + 1);
|
||||
struct bkey_packed *k;
|
||||
|
||||
while ((k = bch2_btree_node_iter_peek_all(iter, b)) &&
|
||||
bkey_deleted(k))
|
||||
bch2_btree_node_iter_advance(iter, b);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline struct bkey_packed *
|
||||
@ -563,14 +553,8 @@ bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
|
||||
|
||||
struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *,
|
||||
struct btree *);
|
||||
struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *,
|
||||
struct btree *, unsigned);
|
||||
|
||||
static inline struct bkey_packed *
|
||||
bch2_btree_node_iter_prev(struct btree_node_iter *iter, struct btree *b)
|
||||
{
|
||||
return bch2_btree_node_iter_prev_filter(iter, b, KEY_TYPE_discard + 1);
|
||||
}
|
||||
struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *,
|
||||
struct btree *);
|
||||
|
||||
struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
|
||||
struct btree *,
|
||||
|
@ -215,7 +215,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
|
||||
for (k = start; k != end; k = n) {
|
||||
n = bkey_next_skip_noops(k, end);
|
||||
|
||||
if (!bkey_whiteout(k)) {
|
||||
if (!bkey_deleted(k)) {
|
||||
bkey_copy(out, k);
|
||||
out = bkey_next(out);
|
||||
} else {
|
||||
@ -725,11 +725,11 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
/*
|
||||
* with the separate whiteouts thing (used for extents), the
|
||||
* second set of keys actually can have whiteouts too, so we
|
||||
* can't solely go off bkey_whiteout()...
|
||||
* can't solely go off bkey_deleted()...
|
||||
*/
|
||||
|
||||
if (!seen_non_whiteout &&
|
||||
(!bkey_whiteout(k) ||
|
||||
(!bkey_deleted(k) ||
|
||||
(prev && bkey_iter_cmp(b, prev, k) > 0))) {
|
||||
*whiteout_u64s = k->_data - i->_data;
|
||||
seen_non_whiteout = true;
|
||||
|
@ -534,7 +534,7 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter,
|
||||
* whiteouts)
|
||||
*/
|
||||
p = level || btree_node_type_is_extents(iter->btree_id)
|
||||
? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard)
|
||||
? bch2_btree_node_iter_prev(&tmp, l->b)
|
||||
: bch2_btree_node_iter_prev_all(&tmp, l->b);
|
||||
k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
||||
|
||||
|
@ -90,7 +90,7 @@ void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
|
||||
|
||||
for_each_bset(b, t)
|
||||
bset_tree_for_each_key(b, t, k)
|
||||
if (!bkey_whiteout(k)) {
|
||||
if (!bkey_deleted(k)) {
|
||||
uk = bkey_unpack_key(b, k);
|
||||
bch2_bkey_format_add_key(s, &uk);
|
||||
}
|
||||
|
@ -73,13 +73,13 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
|
||||
k = NULL;
|
||||
|
||||
/* @k is the key being overwritten/deleted, if any: */
|
||||
EBUG_ON(k && bkey_whiteout(k));
|
||||
EBUG_ON(k && bkey_deleted(k));
|
||||
|
||||
/* Deleting, but not found? nothing to do: */
|
||||
if (bkey_whiteout(&insert->k) && !k)
|
||||
if (bkey_deleted(&insert->k) && !k)
|
||||
return false;
|
||||
|
||||
if (bkey_whiteout(&insert->k)) {
|
||||
if (bkey_deleted(&insert->k)) {
|
||||
/* Deleting: */
|
||||
btree_account_key_drop(b, k);
|
||||
k->type = KEY_TYPE_deleted;
|
||||
|
@ -971,9 +971,9 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
|
||||
|
||||
/* will only happen if all pointers were cached: */
|
||||
if (!bch2_bkey_nr_ptrs(k.s_c))
|
||||
k.k->type = KEY_TYPE_discard;
|
||||
k.k->type = KEY_TYPE_deleted;
|
||||
|
||||
return bkey_whiteout(k.k);
|
||||
return bkey_deleted(k.k);
|
||||
}
|
||||
|
||||
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
|
Loading…
Reference in New Issue
Block a user