diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h index 96ac8f396d46..c9ff590ef978 100644 --- a/fs/bcachefs/alloc_background.h +++ b/fs/bcachefs/alloc_background.h @@ -44,10 +44,10 @@ static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors, struct bch_alloc_v4 a, enum bch_data_type data_type) { + if (stripe) + return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe; if (dirty_sectors) return data_type; - if (stripe) - return BCH_DATA_stripe; if (cached_sectors) return BCH_DATA_cached; if (BCH_ALLOC_V4_NEED_DISCARD(&a)) @@ -64,19 +64,31 @@ static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a, a.stripe, a, data_type); } +static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type) +{ + return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type; +} + static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0; } +#define DATA_TYPES_MOVABLE \ + ((1U << BCH_DATA_btree)| \ + (1U << BCH_DATA_user)| \ + (1U << BCH_DATA_stripe)) + +static inline bool data_type_movable(enum bch_data_type type) +{ + return (1U << type) & DATA_TYPES_MOVABLE; +} + static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, struct bch_dev *ca) { - if (a.data_type != BCH_DATA_btree && - a.data_type != BCH_DATA_user) - return 0; - - if (a.dirty_sectors >= ca->mi.bucket_size) + if (!data_type_movable(a.data_type) || + a.dirty_sectors >= ca->mi.bucket_size) return 0; return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size); diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h index ded1ab7fb0bc..314fee21dc27 100644 --- a/fs/bcachefs/backpointers.h +++ b/fs/bcachefs/backpointers.h @@ -96,12 +96,20 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i); } +static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level, + struct bkey_s_c k, struct extent_ptr_decoded p) +{ + return level ? BCH_DATA_btree : + p.has_ec ? BCH_DATA_stripe : + BCH_DATA_user; +} + static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, struct bpos *bucket_pos, struct bch_backpointer *bp) { - enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user; + enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p); s64 sectors = level ? btree_sectors(c) : k.k->size; u32 bucket_offset; diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 6a0eaa661002..df4cdd16c08d 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -633,8 +633,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) continue; - if (fsck_err_on(g->data_type && - g->data_type != data_type, c, + if (fsck_err_on(bucket_data_type(g->data_type) && + bucket_data_type(g->data_type) != data_type, c, "bucket %u:%zu different types of data in same bucket: %s, %s\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), @@ -1397,6 +1397,16 @@ static int bch2_alloc_write_key(struct btree_trans *trans, if (gen_after(old->gen, gc.gen)) return 0; + if (c->opts.reconstruct_alloc || + fsck_err_on(new.data_type != gc.data_type, c, + "bucket %llu:%llu gen %u has wrong data_type" + ": got %s, should be %s", + iter->pos.inode, iter->pos.offset, + gc.gen, + bch2_data_types[new.data_type], + bch2_data_types[gc.data_type])) + new.data_type = gc.data_type; + #define copy_bucket_field(_f) \ if (c->opts.reconstruct_alloc || \ fsck_err_on(new._f != gc._f, c, \ @@ -1409,7 +1419,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, new._f = gc._f; \ copy_bucket_field(gen); - copy_bucket_field(data_type); copy_bucket_field(dirty_sectors); copy_bucket_field(cached_sectors); copy_bucket_field(stripe_redundancy); diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 40e3d649a05e..c7139dd8e1dc 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -776,7 +776,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; unsigned nr_data = s->nr_blocks - s->nr_redundant; bool parity = ptr_idx >= nr_data; - enum bch_data_type data_type = parity ? BCH_DATA_parity : 0; + enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; s64 sectors = parity ? le16_to_cpu(s->sectors) : 0; const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx; struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); @@ -811,8 +811,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, if (ret) goto err; - if (data_type) - g->data_type = data_type; + g->data_type = data_type; g->dirty_sectors += sectors; g->stripe = k.k->p.offset; @@ -851,15 +850,17 @@ static int __mark_pointer(struct btree_trans *trans, } static int bch2_mark_pointer(struct btree_trans *trans, + enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, - s64 sectors, enum bch_data_type data_type, + s64 sectors, unsigned flags) { u64 journal_seq = trans->journal_res.seq; struct bch_fs *c = trans->c; struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); struct bucket old, new, *g; + enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p); u8 bucket_data_type; int ret = 0; @@ -963,8 +964,7 @@ int bch2_mark_extent(struct btree_trans *trans, if (flags & BTREE_TRIGGER_OVERWRITE) disk_sectors = -disk_sectors; - ret = bch2_mark_pointer(trans, k, p, disk_sectors, - data_type, flags); + ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags); if (ret < 0) return ret; @@ -1596,6 +1596,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans, a->v.stripe = s.k->p.offset; a->v.stripe_redundancy = s.v->nr_redundant; + a->v.data_type = BCH_DATA_stripe; } else { if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset || a->v.stripe_redundancy != s.v->nr_redundant, trans, @@ -1608,6 +1609,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans, a->v.stripe = 0; a->v.stripe_redundancy = 0; + a->v.data_type = alloc_data_type(a->v, BCH_DATA_user); } a->v.dirty_sectors += sectors; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 63b358c95282..bfa0463b4ffe 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -55,8 +55,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, a = bch2_alloc_to_v4(k, &_a); *gen = a->gen; - ret = (a->data_type == BCH_DATA_btree || - a->data_type == BCH_DATA_user) && + ret = data_type_movable(a->data_type) && a->fragmentation_lru && a->fragmentation_lru <= time; @@ -158,13 +157,18 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) struct bch_dev *ca; unsigned dev_idx; s64 wait = S64_MAX, fragmented_allowed, fragmented; + unsigned i; for_each_rw_member(ca, c, dev_idx) { struct bch_dev_usage usage = bch2_dev_usage_read(ca); fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) * ca->mi.bucket_size) >> 1); - fragmented = usage.d[BCH_DATA_user].fragmented; + fragmented = 0; + + for (i = 0; i < BCH_DATA_NR; i++) + if (data_type_movable(i)) + fragmented += usage.d[i].fragmented; wait = min(wait, max(0LL, fragmented_allowed - fragmented)); }