From ae2f17d5ad02bc85a31d09c4396e177581abbb1f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 14 Dec 2019 16:20:33 -0500 Subject: [PATCH] bcachefs: Kill btree_node_iter_large Long overdue cleanup - this converts btree_node_iter_large uses to sort_iter. Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- fs/bcachefs/bkey_sort.c | 270 +++++++++++++--------------------------- fs/bcachefs/bkey_sort.h | 26 ++-- fs/bcachefs/btree_io.c | 22 ++-- fs/bcachefs/super.c | 4 +- 4 files changed, 106 insertions(+), 216 deletions(-) diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c index 4f614cde3267..23b51ef57303 100644 --- a/fs/bcachefs/bkey_sort.c +++ b/fs/bcachefs/bkey_sort.c @@ -5,90 +5,15 @@ #include "bset.h" #include "extents.h" -/* too many iterators, need to clean this up */ - -/* btree_node_iter_large: */ - -#define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r) - -static inline bool -bch2_btree_node_iter_large_end(struct btree_node_iter_large *iter) -{ - return !iter->used; -} - -static inline struct bkey_packed * -bch2_btree_node_iter_large_peek_all(struct btree_node_iter_large *iter, - struct btree *b) -{ - return bch2_btree_node_iter_large_end(iter) - ? NULL - : __btree_node_offset_to_key(b, iter->data->k); -} - -static void -bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter, - struct btree *b) -{ - iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s; - - EBUG_ON(!iter->used); - EBUG_ON(iter->data->k > iter->data->end); - - if (iter->data->k == iter->data->end) - heap_del(iter, 0, btree_node_iter_cmp_heap, NULL); - else - heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL); -} - -static inline struct bkey_packed * -bch2_btree_node_iter_large_next_all(struct btree_node_iter_large *iter, - struct btree *b) -{ - struct bkey_packed *ret = bch2_btree_node_iter_large_peek_all(iter, b); - - if (ret) - bch2_btree_node_iter_large_advance(iter, b); - - return ret; -} - -void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter, - struct btree *b, - const struct bkey_packed *k, - const struct bkey_packed *end) -{ - if (k != end) { - struct btree_node_iter_set n = - ((struct btree_node_iter_set) { - __btree_node_key_to_offset(b, k), - __btree_node_key_to_offset(b, end) - }); - - __heap_add(iter, n, btree_node_iter_cmp_heap, NULL); - } -} - -static void sort_key_next(struct btree_node_iter_large *iter, - struct btree *b, - struct btree_node_iter_set *i) -{ - i->k += __btree_node_offset_to_key(b, i->k)->u64s; - - while (i->k != i->end && - !__btree_node_offset_to_key(b, i->k)->u64s) - i->k++; - - if (i->k == i->end) - *i = iter->data[--iter->used]; -} - -/* regular sort_iters */ - typedef int (*sort_cmp_fn)(struct btree *, struct bkey_packed *, struct bkey_packed *); +static inline bool sort_iter_end(struct sort_iter *iter) +{ + return !iter->used; +} + static inline void __sort_iter_sift(struct sort_iter *iter, unsigned from, sort_cmp_fn cmp) @@ -118,19 +43,29 @@ static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp) static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter) { - return iter->used ? iter->data->k : NULL; + return !sort_iter_end(iter) ? iter->data->k : NULL; +} + +static inline void __sort_iter_advance(struct sort_iter *iter, + unsigned idx, sort_cmp_fn cmp) +{ + struct sort_iter_set *i = iter->data + idx; + + BUG_ON(idx >= iter->used); + + i->k = bkey_next_skip_noops(i->k, i->end); + + BUG_ON(i->k > i->end); + + if (i->k == i->end) + array_remove_item(iter->data, iter->used, idx); + else + __sort_iter_sift(iter, idx, cmp); } static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp) { - iter->data->k = bkey_next_skip_noops(iter->data->k, iter->data->end); - - BUG_ON(iter->data->k > iter->data->end); - - if (iter->data->k == iter->data->end) - array_remove_item(iter->data, iter->used, 0); - else - sort_iter_sift(iter, cmp); + __sort_iter_advance(iter, 0, cmp); } static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter, @@ -145,70 +80,50 @@ static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter, } /* - * Returns true if l > r - unless l == r, in which case returns true if l is - * older than r. - * - * Necessary for btree_sort_fixup() - if there are multiple keys that compare - * equal in different sets, we have to process them newest to oldest. + * If keys compare equal, compare by pointer order: */ -#define key_sort_cmp(h, l, r) \ -({ \ - bkey_cmp_packed(b, \ - __btree_node_offset_to_key(b, (l).k), \ - __btree_node_offset_to_key(b, (r).k)) \ - \ - ?: (l).k - (r).k; \ -}) - -static inline bool should_drop_next_key(struct btree_node_iter_large *iter, - struct btree *b) +static inline int key_sort_fix_overlapping_cmp(struct btree *b, + struct bkey_packed *l, + struct bkey_packed *r) { - struct btree_node_iter_set *l = iter->data, *r = iter->data + 1; - struct bkey_packed *k = __btree_node_offset_to_key(b, l->k); - - if (bkey_whiteout(k)) - return true; - - if (iter->used < 2) - return false; - - if (iter->used > 2 && - key_sort_cmp(iter, r[0], r[1]) >= 0) - r++; - - /* - * key_sort_cmp() ensures that when keys compare equal the older key - * comes first; so if l->k compares equal to r->k then l->k is older and - * should be dropped. - */ - return !bkey_cmp_packed(b, - __btree_node_offset_to_key(b, l->k), - __btree_node_offset_to_key(b, r->k)); + return bkey_cmp_packed(b, l, r) ?: + cmp_int((unsigned long) l, (unsigned long) r); } -struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, - struct btree *b, - struct btree_node_iter_large *iter) +static inline bool should_drop_next_key(struct sort_iter *iter) +{ + /* + * key_sort_cmp() ensures that when keys compare equal the older key + * comes first; so if l->k compares equal to r->k then l->k is older + * and should be dropped. + */ + return iter->used >= 2 && + !bkey_cmp_packed(iter->b, + iter->data[0].k, + iter->data[1].k); +} + +struct btree_nr_keys +bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst, + struct sort_iter *iter) { struct bkey_packed *out = dst->start; + struct bkey_packed *k; struct btree_nr_keys nr; memset(&nr, 0, sizeof(nr)); - heap_resort(iter, key_sort_cmp, NULL); - - while (!bch2_btree_node_iter_large_end(iter)) { - if (!should_drop_next_key(iter, b)) { - struct bkey_packed *k = - __btree_node_offset_to_key(b, iter->data->k); + sort_iter_sort(iter, key_sort_fix_overlapping_cmp); + while ((k = sort_iter_peek(iter))) { + if (!bkey_whiteout(k) && + !should_drop_next_key(iter)) { bkey_copy(out, k); btree_keys_account_key_add(&nr, 0, out); out = bkey_next(out); } - sort_key_next(iter, b, iter->data); - heap_sift_down(iter, 0, key_sort_cmp, NULL); + sort_iter_advance(iter, key_sort_fix_overlapping_cmp); } dst->u64s = cpu_to_le16((u64 *) out - dst->_data); @@ -221,29 +136,16 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, * Necessary for sort_fix_overlapping() - if there are multiple keys that * compare equal in different sets, we have to process them newest to oldest. */ -#define extent_sort_cmp(h, l, r) \ -({ \ - struct bkey _ul = bkey_unpack_key(b, \ - __btree_node_offset_to_key(b, (l).k)); \ - struct bkey _ur = bkey_unpack_key(b, \ - __btree_node_offset_to_key(b, (r).k)); \ - \ - bkey_cmp(bkey_start_pos(&_ul), \ - bkey_start_pos(&_ur)) ?: (r).k - (l).k; \ -}) - -static inline void extent_sort_sift(struct btree_node_iter_large *iter, - struct btree *b, size_t i) +static inline int extent_sort_fix_overlapping_cmp(struct btree *b, + struct bkey_packed *l, + struct bkey_packed *r) { - heap_sift_down(iter, i, extent_sort_cmp, NULL); -} + struct bkey ul = bkey_unpack_key(b, l); + struct bkey ur = bkey_unpack_key(b, r); -static inline void extent_sort_next(struct btree_node_iter_large *iter, - struct btree *b, - struct btree_node_iter_set *i) -{ - sort_key_next(iter, b, i); - heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL); + return bkey_cmp(bkey_start_pos(&ul), + bkey_start_pos(&ur)) ?: + cmp_int((unsigned long) r, (unsigned long) l); } static void extent_sort_advance_prev(struct bkey_format *f, @@ -286,14 +188,14 @@ static void extent_sort_append(struct bch_fs *c, bkey_reassemble((void *) *prev, k.s_c); } -struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, - struct bset *dst, - struct btree *b, - struct btree_node_iter_large *iter) +struct btree_nr_keys +bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst, + struct sort_iter *iter) { + struct btree *b = iter->b; struct bkey_format *f = &b->format; - struct btree_node_iter_set *_l = iter->data, *_r; - struct bkey_packed *prev = NULL, *lk, *rk; + struct sort_iter_set *_l = iter->data, *_r = iter->data + 1; + struct bkey_packed *prev = NULL; struct bkey l_unpacked, r_unpacked; struct bkey_s l, r; struct btree_nr_keys nr; @@ -302,36 +204,32 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, memset(&nr, 0, sizeof(nr)); bkey_on_stack_init(&split); - heap_resort(iter, extent_sort_cmp, NULL); + sort_iter_sort(iter, extent_sort_fix_overlapping_cmp); - while (!bch2_btree_node_iter_large_end(iter)) { - lk = __btree_node_offset_to_key(b, _l->k); - l = __bkey_disassemble(b, lk, &l_unpacked); + while (!sort_iter_end(iter)) { + l = __bkey_disassemble(b, _l->k, &l_unpacked); if (iter->used == 1) { extent_sort_append(c, f, &nr, dst->start, &prev, l); - extent_sort_next(iter, b, _l); + sort_iter_advance(iter, + extent_sort_fix_overlapping_cmp); continue; } - _r = iter->data + 1; - if (iter->used > 2 && - extent_sort_cmp(iter, _r[0], _r[1]) >= 0) - _r++; - - rk = __btree_node_offset_to_key(b, _r->k); - r = __bkey_disassemble(b, rk, &r_unpacked); + r = __bkey_disassemble(b, _r->k, &r_unpacked); /* If current key and next key don't overlap, just append */ if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) { extent_sort_append(c, f, &nr, dst->start, &prev, l); - extent_sort_next(iter, b, _l); + sort_iter_advance(iter, + extent_sort_fix_overlapping_cmp); continue; } /* Skip 0 size keys */ if (!r.k->size) { - extent_sort_next(iter, b, _r); + __sort_iter_advance(iter, 1, + extent_sort_fix_overlapping_cmp); continue; } @@ -348,13 +246,14 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, if (_l->k > _r->k) { /* l wins, trim r */ if (bkey_cmp(l.k->p, r.k->p) >= 0) { - sort_key_next(iter, b, _r); + __sort_iter_advance(iter, 1, + extent_sort_fix_overlapping_cmp); } else { bch2_cut_front_s(l.k->p, r); - extent_save(b, rk, r.k); + extent_save(b, _r->k, r.k); + __sort_iter_sift(iter, 1, + extent_sort_fix_overlapping_cmp); } - - extent_sort_sift(iter, b, _r - iter->data); } else if (bkey_cmp(l.k->p, r.k->p) > 0) { /* @@ -364,15 +263,16 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, bch2_cut_back(bkey_start_pos(r.k), split.k); bch2_cut_front_s(r.k->p, l); - extent_save(b, lk, l.k); + extent_save(b, _l->k, l.k); - extent_sort_sift(iter, b, 0); + __sort_iter_sift(iter, 0, + extent_sort_fix_overlapping_cmp); extent_sort_append(c, f, &nr, dst->start, &prev, bkey_i_to_s(split.k)); } else { bch2_cut_back_s(bkey_start_pos(r.k), l); - extent_save(b, lk, l.k); + extent_save(b, _l->k, l.k); } } diff --git a/fs/bcachefs/bkey_sort.h b/fs/bcachefs/bkey_sort.h index 47a808670341..458a051fdac5 100644 --- a/fs/bcachefs/bkey_sort.h +++ b/fs/bcachefs/bkey_sort.h @@ -2,20 +2,10 @@ #ifndef _BCACHEFS_BKEY_SORT_H #define _BCACHEFS_BKEY_SORT_H -struct btree_node_iter_large { - u16 used; - - struct btree_node_iter_set data[MAX_BSETS]; -}; - -void bch2_btree_node_iter_large_push(struct btree_node_iter_large *, - struct btree *, - const struct bkey_packed *, - const struct bkey_packed *); - struct sort_iter { - struct btree *b; + struct btree *b; unsigned used; + unsigned size; struct sort_iter_set { struct bkey_packed *k, *end; @@ -24,27 +14,27 @@ struct sort_iter { static inline void sort_iter_init(struct sort_iter *iter, struct btree *b) { - memset(iter, 0, sizeof(*iter)); iter->b = b; + iter->used = 0; + iter->size = ARRAY_SIZE(iter->data); } static inline void sort_iter_add(struct sort_iter *iter, struct bkey_packed *k, struct bkey_packed *end) { - BUG_ON(iter->used >= ARRAY_SIZE(iter->data)); + BUG_ON(iter->used >= iter->size); if (k != end) iter->data[iter->used++] = (struct sort_iter_set) { k, end }; } struct btree_nr_keys -bch2_key_sort_fix_overlapping(struct bset *, struct btree *, - struct btree_node_iter_large *); +bch2_key_sort_fix_overlapping(struct bch_fs *, struct bset *, + struct sort_iter *); struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *, struct bset *, - struct btree *, - struct btree_node_iter_large *); + struct sort_iter *); struct btree_nr_keys bch2_sort_repack(struct bset *, struct btree *, diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 90fc31037bbf..209e20fbcd70 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -862,7 +862,7 @@ fsck_err: int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry) { struct btree_node_entry *bne; - struct btree_node_iter_large *iter; + struct sort_iter *iter; struct btree_node *sorted; struct bkey_packed *k; struct bset *i; @@ -871,7 +871,8 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry int ret, retry_read = 0, write = READ; iter = mempool_alloc(&c->fill_iter, GFP_NOIO); - iter->used = 0; + sort_iter_init(iter, b); + iter->size = (btree_blocks(c) + 1) * 2; if (bch2_meta_read_fault("btree")) btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL, @@ -950,13 +951,12 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry if (blacklisted && !first) continue; - bch2_btree_node_iter_large_push(iter, b, - i->start, - vstruct_idx(i, whiteout_u64s)); + sort_iter_add(iter, i->start, + vstruct_idx(i, whiteout_u64s)); - bch2_btree_node_iter_large_push(iter, b, - vstruct_idx(i, whiteout_u64s), - vstruct_last(i)); + sort_iter_add(iter, + vstruct_idx(i, whiteout_u64s), + vstruct_last(i)); } for (bne = write_block(b); @@ -971,9 +971,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry set_btree_bset(b, b->set, &b->data->keys); - b->nr = btree_node_is_extents(b) - ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter) - : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter); + b->nr = (btree_node_is_extents(b) + ? bch2_extent_sort_fix_overlapping + : bch2_key_sort_fix_overlapping)(c, &sorted->keys, iter); u64s = le16_to_cpu(sorted->keys.u64s); *sorted = *b->data; diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 3ad4c0ecbe12..2e23cc3dbf2f 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -705,9 +705,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) if (bch2_fs_init_fault("fs_alloc")) goto err; - iter_size = sizeof(struct btree_node_iter_large) + + iter_size = sizeof(struct sort_iter) + (btree_blocks(c) + 1) * 2 * - sizeof(struct btree_node_iter_set); + sizeof(struct sort_iter_set); if (!(c->wq = alloc_workqueue("bcachefs", WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||