mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
bcachefs: Don't write bucket IO time lazily
With the btree key cache code, we don't need to update the alloc btree lazily - and this will mean we can remove the bch2_alloc_write() call in the shutdown path. Future work: we really need to expend the bucket IO clocks from 16 to 64 bits, so that we don't have to rescale them. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
ffb7c3d370
commit
f30dd86012
@ -489,6 +489,54 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
|
||||
mutex_init(&clock->lock);
|
||||
}
|
||||
|
||||
int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
|
||||
size_t bucket_nr, int rw)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
|
||||
struct btree_iter *iter;
|
||||
struct bucket *g;
|
||||
struct bkey_i_alloc *a;
|
||||
struct bkey_alloc_unpacked u;
|
||||
u16 *time;
|
||||
int ret = 0;
|
||||
|
||||
iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, POS(dev, bucket_nr),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_CACHED_NOFILL|
|
||||
BTREE_ITER_INTENT);
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
|
||||
ret = PTR_ERR_OR_ZERO(a);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
g = bucket(ca, bucket_nr);
|
||||
u = alloc_mem_to_key(g, READ_ONCE(g->mark));
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
bkey_alloc_init(&a->k_i);
|
||||
a->k.p = iter->pos;
|
||||
|
||||
time = rw == READ ? &u.read_time : &u.write_time;
|
||||
if (*time == c->bucket_clock[rw].hand)
|
||||
goto out;
|
||||
|
||||
*time = c->bucket_clock[rw].hand;
|
||||
|
||||
bch2_alloc_pack(a, u);
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, 0);
|
||||
out:
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Background allocator thread: */
|
||||
|
||||
/*
|
||||
|
@ -31,6 +31,8 @@ struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c);
|
||||
void bch2_alloc_pack(struct bkey_i_alloc *,
|
||||
const struct bkey_alloc_unpacked);
|
||||
|
||||
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
|
||||
|
||||
static inline struct bkey_alloc_unpacked
|
||||
alloc_mem_to_key(struct bucket *g, struct bucket_mark m)
|
||||
{
|
||||
|
@ -309,8 +309,6 @@ out:
|
||||
.dev = ca->dev_idx,
|
||||
};
|
||||
|
||||
bucket_io_clock_reset(c, ca, bucket, READ);
|
||||
bucket_io_clock_reset(c, ca, bucket, WRITE);
|
||||
spin_unlock(&ob->lock);
|
||||
|
||||
if (c->blocked_allocate_open_bucket) {
|
||||
|
@ -58,12 +58,6 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
|
||||
return __bucket(ca, b, false);
|
||||
}
|
||||
|
||||
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
|
||||
size_t b, int rw)
|
||||
{
|
||||
bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
|
||||
}
|
||||
|
||||
static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
|
||||
{
|
||||
return c->bucket_clock[rw].hand - g->io_time[rw];
|
||||
|
@ -826,7 +826,7 @@ retry:
|
||||
if (bkey_extent_is_allocation(k.k))
|
||||
bch2_add_page_sectors(&rbio->bio, k);
|
||||
|
||||
bch2_read_extent(c, rbio, k, offset_into_extent, flags);
|
||||
bch2_read_extent(trans, rbio, k, offset_into_extent, flags);
|
||||
|
||||
if (flags & BCH_READ_LAST_FRAGMENT)
|
||||
break;
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_background.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bset.h"
|
||||
@ -1640,7 +1641,7 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
|
||||
ret = __bch2_read_extent(&trans, rbio, bvec_iter, k, 0, failed, flags);
|
||||
if (ret == READ_RETRY)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@ -1698,7 +1699,7 @@ retry:
|
||||
bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
|
||||
swap(bvec_iter.bi_size, bytes);
|
||||
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k,
|
||||
ret = __bch2_read_extent(&trans, rbio, bvec_iter, k,
|
||||
offset_into_extent, failed, flags);
|
||||
switch (ret) {
|
||||
case READ_RETRY:
|
||||
@ -2026,11 +2027,12 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
struct bvec_iter iter, struct bkey_s_c k,
|
||||
unsigned offset_into_extent,
|
||||
struct bch_io_failures *failed, unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct extent_ptr_decoded pick;
|
||||
struct bch_read_bio *rbio = NULL;
|
||||
struct bch_dev *ca;
|
||||
@ -2200,9 +2202,9 @@ get_bio:
|
||||
|
||||
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
|
||||
|
||||
rcu_read_lock();
|
||||
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
|
||||
rcu_read_unlock();
|
||||
if (pick.ptr.cached)
|
||||
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
|
||||
PTR_BUCKET_NR(ca, &pick.ptr), READ);
|
||||
|
||||
if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
|
||||
bio_inc_remaining(&orig->bio);
|
||||
@ -2351,7 +2353,7 @@ retry:
|
||||
if (rbio->bio.bi_iter.bi_size == bytes)
|
||||
flags |= BCH_READ_LAST_FRAGMENT;
|
||||
|
||||
bch2_read_extent(c, rbio, k, offset_into_extent, flags);
|
||||
bch2_read_extent(&trans, rbio, k, offset_into_extent, flags);
|
||||
|
||||
if (flags & BCH_READ_LAST_FRAGMENT)
|
||||
break;
|
||||
|
@ -140,17 +140,17 @@ enum bch_read_flags {
|
||||
BCH_READ_IN_RETRY = 1 << 7,
|
||||
};
|
||||
|
||||
int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *,
|
||||
int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
|
||||
struct bvec_iter, struct bkey_s_c, unsigned,
|
||||
struct bch_io_failures *, unsigned);
|
||||
|
||||
static inline void bch2_read_extent(struct bch_fs *c,
|
||||
static inline void bch2_read_extent(struct btree_trans *trans,
|
||||
struct bch_read_bio *rbio,
|
||||
struct bkey_s_c k,
|
||||
unsigned offset_into_extent,
|
||||
unsigned flags)
|
||||
{
|
||||
__bch2_read_extent(c, rbio, rbio->bio.bi_iter, k,
|
||||
__bch2_read_extent(trans, rbio, rbio->bio.bi_iter, k,
|
||||
offset_into_extent, NULL, flags);
|
||||
}
|
||||
|
||||
|
@ -414,7 +414,7 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
|
||||
atomic_read(&ctxt->write_sectors) != sectors_pending);
|
||||
}
|
||||
|
||||
static int bch2_move_extent(struct bch_fs *c,
|
||||
static int bch2_move_extent(struct btree_trans *trans,
|
||||
struct moving_context *ctxt,
|
||||
struct write_point_specifier wp,
|
||||
struct bch_io_opts io_opts,
|
||||
@ -423,6 +423,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
enum data_cmd data_cmd,
|
||||
struct data_opts data_opts)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct moving_io *io;
|
||||
const union bch_extent_entry *entry;
|
||||
@ -489,7 +490,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
* ctxt when doing wakeup
|
||||
*/
|
||||
closure_get(&ctxt->cl);
|
||||
bch2_read_extent(c, &io->rbio, k, 0,
|
||||
bch2_read_extent(trans, &io->rbio, k, 0,
|
||||
BCH_READ_NODECODE|
|
||||
BCH_READ_LAST_FRAGMENT);
|
||||
return 0;
|
||||
@ -607,7 +608,7 @@ peek:
|
||||
k = bkey_i_to_s_c(sk.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
ret2 = bch2_move_extent(c, ctxt, wp, io_opts, btree_id, k,
|
||||
ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
|
||||
data_cmd, data_opts);
|
||||
if (ret2) {
|
||||
if (ret2 == -ENOMEM) {
|
||||
|
Loading…
Reference in New Issue
Block a user