2017-03-17 06:18:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2019-03-29 23:49:17 +00:00
|
|
|
#include "btree_gc.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "btree_io.h"
|
|
|
|
#include "btree_iter.h"
|
2023-08-05 20:08:44 +00:00
|
|
|
#include "btree_journal_iter.h"
|
2019-03-08 00:46:10 +00:00
|
|
|
#include "btree_key_cache.h"
|
2023-08-05 16:55:08 +00:00
|
|
|
#include "btree_update_interior.h"
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
#include "btree_write_buffer.h"
|
2018-11-05 07:31:48 +00:00
|
|
|
#include "buckets.h"
|
2022-07-18 03:06:38 +00:00
|
|
|
#include "errcode.h"
|
2018-07-17 17:50:15 +00:00
|
|
|
#include "error.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "journal.h"
|
|
|
|
#include "journal_reclaim.h"
|
2018-11-07 22:48:32 +00:00
|
|
|
#include "replicas.h"
|
2023-08-16 20:54:33 +00:00
|
|
|
#include "snapshot.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-10-28 23:35:13 +00:00
|
|
|
#include <linux/prefetch.h>
|
2022-12-20 16:13:19 +00:00
|
|
|
|
2022-11-23 23:46:03 +00:00
|
|
|
static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bkey u;
|
|
|
|
struct bkey_s_c k = bch2_btree_path_peek_slot_exact(i->path, &u);
|
|
|
|
|
|
|
|
if (unlikely(trans->journal_replay_not_finished)) {
|
|
|
|
struct bkey_i *j_k =
|
|
|
|
bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p);
|
|
|
|
|
|
|
|
if (j_k)
|
|
|
|
k = bkey_i_to_s_c(j_k);
|
|
|
|
}
|
|
|
|
|
2023-02-13 23:21:40 +00:00
|
|
|
u = *k.k;
|
|
|
|
u.needs_whiteout = i->old_k.needs_whiteout;
|
2022-11-23 23:46:03 +00:00
|
|
|
|
2023-02-13 23:21:40 +00:00
|
|
|
BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey)));
|
2022-11-23 23:46:03 +00:00
|
|
|
BUG_ON(i->old_v != k.v);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
static inline struct btree_path_level *insert_l(struct btree_insert_entry *i)
|
2021-08-30 20:08:34 +00:00
|
|
|
{
|
2021-08-30 19:18:31 +00:00
|
|
|
return i->path->l + i->level;
|
2021-08-30 20:08:34 +00:00
|
|
|
}
|
|
|
|
|
2019-09-07 18:16:00 +00:00
|
|
|
static inline bool same_leaf_as_prev(struct btree_trans *trans,
|
2020-01-01 00:37:10 +00:00
|
|
|
struct btree_insert_entry *i)
|
2019-09-07 18:16:00 +00:00
|
|
|
{
|
2021-06-07 18:54:56 +00:00
|
|
|
return i != trans->updates &&
|
2021-08-30 20:08:34 +00:00
|
|
|
insert_l(&i[0])->b == insert_l(&i[-1])->b;
|
2019-09-07 18:16:00 +00:00
|
|
|
}
|
|
|
|
|
2021-08-28 00:55:44 +00:00
|
|
|
static inline bool same_leaf_as_next(struct btree_trans *trans,
|
|
|
|
struct btree_insert_entry *i)
|
|
|
|
{
|
|
|
|
return i + 1 < trans->updates + trans->nr_updates &&
|
2021-08-30 20:08:34 +00:00
|
|
|
insert_l(&i[0])->b == insert_l(&i[1])->b;
|
2021-08-28 00:55:44 +00:00
|
|
|
}
|
|
|
|
|
2022-10-09 09:04:38 +00:00
|
|
|
inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
|
|
|
|
struct btree_path *path,
|
|
|
|
struct btree *b)
|
2019-03-15 21:11:58 +00:00
|
|
|
{
|
2021-07-11 03:22:06 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
|
2019-09-21 19:29:34 +00:00
|
|
|
if (unlikely(btree_node_just_written(b)) &&
|
2019-03-15 21:11:58 +00:00
|
|
|
bch2_btree_post_write_cleanup(c, b))
|
2021-08-30 18:36:03 +00:00
|
|
|
bch2_trans_node_reinit_iter(trans, b);
|
2019-03-15 21:11:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the last bset has been written, or if it's gotten too big - start
|
|
|
|
* a new bset to insert into:
|
|
|
|
*/
|
|
|
|
if (want_new_bset(c, b))
|
2021-08-30 18:36:03 +00:00
|
|
|
bch2_btree_init_next(trans, b);
|
2019-03-15 21:11:58 +00:00
|
|
|
}
|
|
|
|
|
2023-11-07 00:49:47 +00:00
|
|
|
static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
|
|
|
|
{
|
|
|
|
while (--i >= trans->updates) {
|
|
|
|
if (same_leaf_as_prev(trans, i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
|
|
|
|
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bch2_trans_lock_write(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
|
|
|
|
EBUG_ON(trans->write_locked);
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
if (same_leaf_as_prev(trans, i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c))
|
|
|
|
return trans_lock_write_fail(trans, i);
|
|
|
|
|
|
|
|
if (!i->cached)
|
|
|
|
bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
|
|
|
|
}
|
|
|
|
|
|
|
|
trans->write_locked = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void bch2_trans_unlock_write(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
if (likely(trans->write_locked)) {
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
if (!same_leaf_as_prev(trans, i))
|
|
|
|
bch2_btree_node_unlock_write_inlined(trans, i->path,
|
|
|
|
insert_l(i)->b);
|
|
|
|
trans->write_locked = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Inserting into a given leaf node (last stage of insert): */
|
|
|
|
|
|
|
|
/* Handle overwrites and do insert, for non extents: */
|
2021-08-25 01:30:06 +00:00
|
|
|
bool bch2_btree_bset_insert_key(struct btree_trans *trans,
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_path *path,
|
2017-03-17 06:18:50 +00:00
|
|
|
struct btree *b,
|
|
|
|
struct btree_node_iter *node_iter,
|
|
|
|
struct bkey_i *insert)
|
|
|
|
{
|
|
|
|
struct bkey_packed *k;
|
2020-01-31 01:26:08 +00:00
|
|
|
unsigned clobber_u64s = 0, new_u64s = 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
EBUG_ON(btree_node_just_written(b));
|
|
|
|
EBUG_ON(bset_written(b, btree_bset_last(b)));
|
|
|
|
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
|
2022-11-24 08:12:22 +00:00
|
|
|
EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
|
|
|
|
EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
|
2019-12-30 19:37:25 +00:00
|
|
|
EBUG_ON(insert->k.u64s >
|
2021-08-25 01:30:06 +00:00
|
|
|
bch_btree_keys_u64s_remaining(trans->c, b));
|
2023-08-21 23:57:34 +00:00
|
|
|
EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
k = bch2_btree_node_iter_peek_all(node_iter, b);
|
2020-11-07 17:31:20 +00:00
|
|
|
if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
|
2020-01-07 04:43:04 +00:00
|
|
|
k = NULL;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-01-07 04:43:04 +00:00
|
|
|
/* @k is the key being overwritten/deleted, if any: */
|
2021-02-20 04:41:40 +00:00
|
|
|
EBUG_ON(k && bkey_deleted(k));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-01-31 01:26:08 +00:00
|
|
|
/* Deleting, but not found? nothing to do: */
|
2021-02-20 04:41:40 +00:00
|
|
|
if (bkey_deleted(&insert->k) && !k)
|
2020-01-31 01:26:08 +00:00
|
|
|
return false;
|
|
|
|
|
2021-02-20 04:41:40 +00:00
|
|
|
if (bkey_deleted(&insert->k)) {
|
2020-01-07 04:43:04 +00:00
|
|
|
/* Deleting: */
|
|
|
|
btree_account_key_drop(b, k);
|
|
|
|
k->type = KEY_TYPE_deleted;
|
2019-11-29 19:08:51 +00:00
|
|
|
|
2020-01-31 01:26:08 +00:00
|
|
|
if (k->needs_whiteout)
|
2021-08-25 01:30:06 +00:00
|
|
|
push_whiteout(trans->c, b, insert->k.p);
|
2020-01-31 01:26:08 +00:00
|
|
|
k->needs_whiteout = false;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-01-07 04:43:04 +00:00
|
|
|
if (k >= btree_bset_last(b)->start) {
|
|
|
|
clobber_u64s = k->u64s;
|
|
|
|
bch2_bset_delete(b, k, clobber_u64s);
|
2020-01-31 01:26:08 +00:00
|
|
|
goto fix_iter;
|
2020-01-07 04:43:04 +00:00
|
|
|
} else {
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_btree_path_fix_key_modified(trans, b, k);
|
2020-01-07 04:43:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-11-29 19:08:51 +00:00
|
|
|
|
2020-01-07 04:43:04 +00:00
|
|
|
if (k) {
|
|
|
|
/* Overwriting: */
|
|
|
|
btree_account_key_drop(b, k);
|
|
|
|
k->type = KEY_TYPE_deleted;
|
|
|
|
|
2020-01-05 23:20:23 +00:00
|
|
|
insert->k.needs_whiteout = k->needs_whiteout;
|
|
|
|
k->needs_whiteout = false;
|
|
|
|
|
2019-11-29 19:08:51 +00:00
|
|
|
if (k >= btree_bset_last(b)->start) {
|
|
|
|
clobber_u64s = k->u64s;
|
2017-03-17 06:18:50 +00:00
|
|
|
goto overwrite;
|
2020-01-07 04:43:04 +00:00
|
|
|
} else {
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_btree_path_fix_key_modified(trans, b, k);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-11 23:12:05 +00:00
|
|
|
k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
|
2017-03-17 06:18:50 +00:00
|
|
|
overwrite:
|
|
|
|
bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
|
2020-01-31 01:26:08 +00:00
|
|
|
new_u64s = k->u64s;
|
|
|
|
fix_iter:
|
|
|
|
if (clobber_u64s != new_u64s)
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
|
2020-01-31 01:26:08 +00:00
|
|
|
clobber_u64s, new_u64s);
|
2017-03-17 06:18:50 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-04-03 20:24:13 +00:00
|
|
|
static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
|
2017-03-17 06:18:50 +00:00
|
|
|
unsigned i, u64 seq)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
struct btree_write *w = container_of(pin, struct btree_write, journal);
|
|
|
|
struct btree *b = container_of(w, struct btree, writes[i]);
|
2023-09-12 21:16:02 +00:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
2022-02-27 14:42:46 +00:00
|
|
|
unsigned long old, new, v;
|
|
|
|
unsigned idx = w - b->writes;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
|
2022-02-27 14:42:46 +00:00
|
|
|
v = READ_ONCE(b->flags);
|
|
|
|
|
|
|
|
do {
|
|
|
|
old = new = v;
|
|
|
|
|
|
|
|
if (!(old & (1 << BTREE_NODE_dirty)) ||
|
|
|
|
!!(old & (1 << BTREE_NODE_write_idx)) != idx ||
|
|
|
|
w->journal.seq != seq)
|
|
|
|
break;
|
|
|
|
|
2022-11-17 21:03:15 +00:00
|
|
|
new &= ~BTREE_WRITE_TYPE_MASK;
|
|
|
|
new |= BTREE_WRITE_journal_reclaim;
|
2022-02-27 14:42:46 +00:00
|
|
|
new |= 1 << BTREE_NODE_need_write;
|
|
|
|
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
|
|
|
|
|
|
|
btree_node_write_if_need(c, b, SIX_LOCK_read);
|
2020-06-06 16:28:01 +00:00
|
|
|
six_unlock_read(&b->c.lock);
|
2022-08-21 18:29:43 +00:00
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_put(trans);
|
2021-04-03 20:24:13 +00:00
|
|
|
return 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 12:28:20 +00:00
|
|
|
int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
return __btree_node_flush(j, pin, 0, seq);
|
|
|
|
}
|
|
|
|
|
2023-03-07 12:28:20 +00:00
|
|
|
int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
return __btree_node_flush(j, pin, 1, seq);
|
|
|
|
}
|
|
|
|
|
2020-02-09 00:06:31 +00:00
|
|
|
inline void bch2_btree_add_journal_pin(struct bch_fs *c,
|
|
|
|
struct btree *b, u64 seq)
|
|
|
|
{
|
|
|
|
struct btree_write *w = btree_current_write(b);
|
|
|
|
|
|
|
|
bch2_journal_pin_add(&c->journal, seq, &w->journal,
|
|
|
|
btree_node_write_idx(b) == 0
|
2023-03-07 12:28:20 +00:00
|
|
|
? bch2_btree_node_flush0
|
|
|
|
: bch2_btree_node_flush1);
|
2020-02-09 00:06:31 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/**
|
2023-09-12 22:41:22 +00:00
|
|
|
* bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
|
|
|
|
* @trans: btree transaction object
|
|
|
|
* @path: path pointing to @insert's pos
|
|
|
|
* @insert: key to insert
|
|
|
|
* @journal_seq: sequence number of journal reservation
|
2017-03-17 06:18:50 +00:00
|
|
|
*/
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
|
|
|
|
struct btree_path *path,
|
|
|
|
struct bkey_i *insert,
|
|
|
|
u64 journal_seq)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
struct btree *b = path_l(path)->b;
|
2019-10-19 23:03:23 +00:00
|
|
|
struct bset_tree *t = bset_tree_last(b);
|
2020-06-10 01:00:29 +00:00
|
|
|
struct bset *i = bset(b, t);
|
2019-10-19 23:03:23 +00:00
|
|
|
int old_u64s = bset_u64s(t);
|
2017-03-17 06:18:50 +00:00
|
|
|
int old_live_u64s = b->nr.live_u64s;
|
|
|
|
int live_u64s_added, u64s_added;
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
|
|
|
|
&path_l(path)->iter, insert)))
|
2022-03-11 23:16:42 +00:00
|
|
|
return;
|
2020-06-10 01:00:29 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq)));
|
2019-11-26 22:26:04 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
bch2_btree_add_journal_pin(c, b, journal_seq);
|
2020-06-10 01:00:29 +00:00
|
|
|
|
2023-07-09 19:13:30 +00:00
|
|
|
if (unlikely(!btree_node_dirty(b))) {
|
|
|
|
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
|
2022-02-26 16:10:20 +00:00
|
|
|
set_btree_node_dirty_acct(c, b);
|
2023-07-09 19:13:30 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
|
2019-10-19 23:03:23 +00:00
|
|
|
u64s_added = (int) bset_u64s(t) - old_u64s;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
|
|
|
|
b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
|
|
|
|
if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
|
|
|
|
b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
|
|
|
|
|
|
|
|
if (u64s_added > live_u64s_added &&
|
|
|
|
bch2_maybe_compact_whiteouts(c, b))
|
2021-08-30 18:36:03 +00:00
|
|
|
bch2_trans_node_reinit_iter(trans, b);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:46:10 +00:00
|
|
|
/* Cached btree updates: */
|
|
|
|
|
2019-03-15 21:11:58 +00:00
|
|
|
/* Normal update interface: */
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-18 20:18:39 +00:00
|
|
|
static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
2021-02-21 01:51:57 +00:00
|
|
|
struct btree_insert_entry *i)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2022-11-24 08:12:22 +00:00
|
|
|
BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
|
2021-08-30 19:18:31 +00:00
|
|
|
BUG_ON(i->cached != i->path->cached);
|
|
|
|
BUG_ON(i->level != i->path->level);
|
|
|
|
BUG_ON(i->btree_id != i->path->btree_id);
|
2021-03-16 04:42:25 +00:00
|
|
|
EBUG_ON(!i->level &&
|
2023-11-03 22:30:08 +00:00
|
|
|
btree_type_has_snapshots(i->btree_id) &&
|
2021-03-16 04:42:25 +00:00
|
|
|
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
|
|
|
|
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
|
|
|
|
i->k->k.p.snapshot &&
|
2023-07-12 17:55:03 +00:00
|
|
|
bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
|
2019-03-15 21:11:58 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-11-01 07:37:53 +00:00
|
|
|
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
|
2023-02-09 18:22:12 +00:00
|
|
|
unsigned flags)
|
2019-02-26 22:13:46 +00:00
|
|
|
{
|
2022-12-13 20:17:40 +00:00
|
|
|
return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
|
2023-02-09 18:22:12 +00:00
|
|
|
trans->journal_u64s, flags);
|
2019-03-15 21:11:58 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-12-31 21:12:54 +00:00
|
|
|
#define JSET_ENTRY_LOG_U64s 4
|
|
|
|
|
|
|
|
static noinline void journal_transaction_name(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
2022-06-05 19:29:00 +00:00
|
|
|
struct journal *j = &c->journal;
|
|
|
|
struct jset_entry *entry =
|
|
|
|
bch2_journal_add_entry(j, &trans->journal_res,
|
|
|
|
BCH_JSET_ENTRY_log, 0, 0,
|
|
|
|
JSET_ENTRY_LOG_U64s);
|
|
|
|
struct jset_entry_log *l =
|
|
|
|
container_of(entry, struct jset_entry_log, entry);
|
|
|
|
|
|
|
|
strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
|
2021-12-31 21:12:54 +00:00
|
|
|
}
|
|
|
|
|
2022-12-07 16:39:34 +00:00
|
|
|
static inline int btree_key_can_insert(struct btree_trans *trans,
|
|
|
|
struct btree *b, unsigned u64s)
|
2018-08-03 23:41:44 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
|
2020-07-25 18:19:37 +00:00
|
|
|
if (!bch2_btree_node_insert_fits(c, b, u64s))
|
2022-12-07 16:39:34 +00:00
|
|
|
return -BCH_ERR_btree_insert_btree_node_full;
|
2018-08-03 23:41:44 +00:00
|
|
|
|
2022-12-07 16:39:34 +00:00
|
|
|
return 0;
|
2018-08-03 23:41:44 +00:00
|
|
|
}
|
|
|
|
|
2023-11-05 20:28:44 +00:00
|
|
|
noinline static int
|
|
|
|
btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
|
|
|
|
struct btree_path *path, unsigned new_u64s)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
struct bkey_cached *ck = (void *) path->l[0].b;
|
|
|
|
struct bkey_i *new_k;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
bch2_trans_unlock_write(trans);
|
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
|
|
|
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
|
|
|
|
if (!new_k) {
|
|
|
|
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
|
|
|
bch2_btree_id_str(path->btree_id), new_u64s);
|
|
|
|
return -BCH_ERR_ENOMEM_btree_key_cache_insert;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_relock(trans) ?:
|
|
|
|
bch2_trans_lock_write(trans);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
kfree(new_k);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(new_k, ck->k, ck->u64s * sizeof(u64));
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
if (i->old_v == &ck->k->v)
|
|
|
|
i->old_v = &new_k->v;
|
|
|
|
|
|
|
|
kfree(ck->k);
|
|
|
|
ck->u64s = new_u64s;
|
|
|
|
ck->k = new_k;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
|
|
|
|
struct btree_path *path, unsigned u64s)
|
2019-03-08 00:46:10 +00:00
|
|
|
{
|
2021-12-31 01:14:52 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-08-30 19:18:31 +00:00
|
|
|
struct bkey_cached *ck = (void *) path->l[0].b;
|
2022-11-23 23:46:03 +00:00
|
|
|
struct btree_insert_entry *i;
|
2022-10-17 11:32:57 +00:00
|
|
|
unsigned new_u64s;
|
2019-03-08 00:46:10 +00:00
|
|
|
struct bkey_i *new_k;
|
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
EBUG_ON(path->level);
|
2019-03-08 00:46:10 +00:00
|
|
|
|
2020-11-20 02:40:03 +00:00
|
|
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
2021-12-31 01:14:52 +00:00
|
|
|
bch2_btree_key_cache_must_wait(c) &&
|
2023-02-09 18:22:12 +00:00
|
|
|
!(flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
2022-12-07 16:39:34 +00:00
|
|
|
return -BCH_ERR_btree_insert_need_journal_reclaim;
|
2020-11-20 02:40:03 +00:00
|
|
|
|
2021-04-24 04:42:02 +00:00
|
|
|
/*
|
|
|
|
* bch2_varint_decode can read past the end of the buffer by at most 7
|
|
|
|
* bytes (it won't be used):
|
|
|
|
*/
|
|
|
|
u64s += 1;
|
|
|
|
|
2020-06-28 22:11:12 +00:00
|
|
|
if (u64s <= ck->u64s)
|
2022-12-07 16:39:34 +00:00
|
|
|
return 0;
|
2019-03-08 00:46:10 +00:00
|
|
|
|
2020-06-28 22:11:12 +00:00
|
|
|
new_u64s = roundup_pow_of_two(u64s);
|
2023-11-05 20:28:44 +00:00
|
|
|
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT);
|
|
|
|
if (unlikely(!new_k))
|
|
|
|
return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
|
2019-03-08 00:46:10 +00:00
|
|
|
|
2022-11-23 23:46:03 +00:00
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
if (i->old_v == &ck->k->v)
|
|
|
|
i->old_v = &new_k->v;
|
|
|
|
|
2019-03-08 00:46:10 +00:00
|
|
|
ck->u64s = new_u64s;
|
|
|
|
ck->k = new_k;
|
2022-10-17 11:32:57 +00:00
|
|
|
return 0;
|
2019-03-08 00:46:10 +00:00
|
|
|
}
|
|
|
|
|
2022-02-24 16:30:17 +00:00
|
|
|
/* Triggers: */
|
|
|
|
|
|
|
|
static int run_one_mem_trigger(struct btree_trans *trans,
|
|
|
|
struct btree_insert_entry *i,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
2022-02-24 16:02:58 +00:00
|
|
|
struct bkey_s_c old = { &i->old_k, i->old_v };
|
2022-02-24 16:30:17 +00:00
|
|
|
struct bkey_i *new = i->k;
|
2023-07-06 23:23:27 +00:00
|
|
|
const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
|
|
|
|
const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
|
2022-02-24 16:30:17 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-11-23 23:46:03 +00:00
|
|
|
verify_update_old_key(trans, i);
|
|
|
|
|
2022-02-24 16:30:17 +00:00
|
|
|
if (unlikely(flags & BTREE_TRIGGER_NORUN))
|
|
|
|
return 0;
|
|
|
|
|
2023-10-20 04:01:53 +00:00
|
|
|
if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)))
|
2022-02-24 16:30:17 +00:00
|
|
|
return 0;
|
|
|
|
|
2023-06-23 00:18:12 +00:00
|
|
|
if (old_ops->atomic_trigger == new_ops->atomic_trigger) {
|
2023-03-02 03:14:31 +00:00
|
|
|
ret = bch2_mark_key(trans, i->btree_id, i->level,
|
|
|
|
old, bkey_i_to_s_c(new),
|
2022-02-24 16:30:17 +00:00
|
|
|
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
|
|
|
|
} else {
|
2022-02-24 16:02:58 +00:00
|
|
|
struct bkey _deleted = KEY(0, 0, 0);
|
|
|
|
struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
|
|
|
|
|
|
|
|
_deleted.p = i->path->pos;
|
|
|
|
|
2023-03-02 03:14:31 +00:00
|
|
|
ret = bch2_mark_key(trans, i->btree_id, i->level,
|
|
|
|
deleted, bkey_i_to_s_c(new),
|
2022-02-24 16:30:17 +00:00
|
|
|
BTREE_TRIGGER_INSERT|flags) ?:
|
2023-03-02 03:14:31 +00:00
|
|
|
bch2_mark_key(trans, i->btree_id, i->level,
|
|
|
|
old, deleted,
|
2022-02-24 16:30:17 +00:00
|
|
|
BTREE_TRIGGER_OVERWRITE|flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-24 16:02:58 +00:00
|
|
|
static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
|
2022-03-31 03:39:48 +00:00
|
|
|
bool overwrite)
|
2022-02-24 16:30:17 +00:00
|
|
|
{
|
2022-02-24 16:02:58 +00:00
|
|
|
/*
|
|
|
|
* Transactional triggers create new btree_insert_entries, so we can't
|
|
|
|
* pass them a pointer to a btree_insert_entry, that memory is going to
|
|
|
|
* move:
|
|
|
|
*/
|
|
|
|
struct bkey old_k = i->old_k;
|
|
|
|
struct bkey_s_c old = { &old_k, i->old_v };
|
2023-07-06 23:23:27 +00:00
|
|
|
const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
|
|
|
|
const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
|
2022-02-24 16:30:17 +00:00
|
|
|
|
2022-11-23 23:46:03 +00:00
|
|
|
verify_update_old_key(trans, i);
|
|
|
|
|
2022-02-24 16:30:17 +00:00
|
|
|
if ((i->flags & BTREE_TRIGGER_NORUN) ||
|
|
|
|
!(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
|
|
|
|
return 0;
|
|
|
|
|
2022-03-31 03:39:48 +00:00
|
|
|
if (!i->insert_trigger_run &&
|
|
|
|
!i->overwrite_trigger_run &&
|
2023-06-23 00:18:12 +00:00
|
|
|
old_ops->trans_trigger == new_ops->trans_trigger) {
|
2022-02-24 16:30:17 +00:00
|
|
|
i->overwrite_trigger_run = true;
|
2022-03-31 03:39:48 +00:00
|
|
|
i->insert_trigger_run = true;
|
2022-04-01 01:44:55 +00:00
|
|
|
return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
|
2022-03-31 03:39:48 +00:00
|
|
|
BTREE_TRIGGER_INSERT|
|
|
|
|
BTREE_TRIGGER_OVERWRITE|
|
|
|
|
i->flags) ?: 1;
|
|
|
|
} else if (overwrite && !i->overwrite_trigger_run) {
|
|
|
|
i->overwrite_trigger_run = true;
|
2022-04-01 01:44:55 +00:00
|
|
|
return bch2_trans_mark_old(trans, i->btree_id, i->level, old, i->flags) ?: 1;
|
2022-03-31 03:39:48 +00:00
|
|
|
} else if (!overwrite && !i->insert_trigger_run) {
|
|
|
|
i->insert_trigger_run = true;
|
2022-04-01 01:44:55 +00:00
|
|
|
return bch2_trans_mark_new(trans, i->btree_id, i->level, i->k, i->flags) ?: 1;
|
2022-02-24 16:30:17 +00:00
|
|
|
} else {
|
2022-03-31 03:39:48 +00:00
|
|
|
return 0;
|
2022-02-24 16:30:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
|
|
|
|
struct btree_insert_entry *btree_id_start)
|
|
|
|
{
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
bool trans_trigger_run;
|
|
|
|
int ret, overwrite;
|
|
|
|
|
2022-03-31 03:39:48 +00:00
|
|
|
for (overwrite = 1; overwrite >= 0; --overwrite) {
|
2022-02-24 16:30:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Running triggers will append more updates to the list of updates as
|
|
|
|
* we're walking it:
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
trans_trigger_run = false;
|
|
|
|
|
|
|
|
for (i = btree_id_start;
|
|
|
|
i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
|
|
|
|
i++) {
|
2022-01-06 06:20:41 +00:00
|
|
|
if (i->btree_id != btree_id)
|
|
|
|
continue;
|
|
|
|
|
2022-02-24 16:30:17 +00:00
|
|
|
ret = run_one_trans_trigger(trans, i, overwrite);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret)
|
|
|
|
trans_trigger_run = true;
|
|
|
|
}
|
|
|
|
} while (trans_trigger_run);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct btree_insert_entry *i = NULL, *btree_id_start = trans->updates;
|
|
|
|
unsigned btree_id = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* For a given btree, this algorithm runs insert triggers before
|
|
|
|
* overwrite triggers: this is so that when extents are being moved
|
|
|
|
* (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
|
|
|
|
* they are re-added.
|
|
|
|
*/
|
|
|
|
for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
|
2022-01-06 06:20:41 +00:00
|
|
|
if (btree_id == BTREE_ID_alloc)
|
|
|
|
continue;
|
|
|
|
|
2022-02-24 16:30:17 +00:00
|
|
|
while (btree_id_start < trans->updates + trans->nr_updates &&
|
|
|
|
btree_id_start->btree_id < btree_id)
|
|
|
|
btree_id_start++;
|
|
|
|
|
|
|
|
ret = run_btree_triggers(trans, btree_id, btree_id_start);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-06 06:20:41 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
if (i->btree_id > BTREE_ID_alloc)
|
|
|
|
break;
|
|
|
|
if (i->btree_id == BTREE_ID_alloc) {
|
|
|
|
ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-23 03:06:04 +00:00
|
|
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
2022-02-24 16:30:17 +00:00
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
|
|
|
|
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
|
|
|
|
(!i->insert_trigger_run || !i->overwrite_trigger_run));
|
2022-11-23 03:06:04 +00:00
|
|
|
#endif
|
2022-02-24 16:30:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_insert_entry *i;
|
2022-01-05 03:32:09 +00:00
|
|
|
int ret = 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-08 00:46:10 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
/*
|
|
|
|
* XXX: synchronization of cached update triggers with gc
|
2021-08-30 19:18:31 +00:00
|
|
|
* XXX: synchronization of interior node updates with gc
|
2019-03-08 00:46:10 +00:00
|
|
|
*/
|
2021-08-30 20:08:34 +00:00
|
|
|
BUG_ON(i->cached || i->level);
|
2019-03-08 00:46:10 +00:00
|
|
|
|
2022-01-05 03:32:09 +00:00
|
|
|
if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) {
|
2022-02-24 16:30:17 +00:00
|
|
|
ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
|
2022-01-05 03:32:09 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
2019-03-08 00:46:10 +00:00
|
|
|
}
|
2022-01-05 03:32:09 +00:00
|
|
|
|
|
|
|
return ret;
|
2019-10-19 23:03:23 +00:00
|
|
|
}
|
2019-09-07 18:16:00 +00:00
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
static inline int
|
2023-02-09 18:22:12 +00:00
|
|
|
bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
2021-06-04 19:18:10 +00:00
|
|
|
struct btree_insert_entry **stopped_at,
|
|
|
|
unsigned long trace_ip)
|
2019-10-19 23:03:23 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_insert_entry *i;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
struct btree_write_buffered_key *wb;
|
2021-02-04 02:51:56 +00:00
|
|
|
struct btree_trans_commit_hook *h;
|
2020-01-01 00:37:10 +00:00
|
|
|
unsigned u64s = 0;
|
2019-10-19 23:03:23 +00:00
|
|
|
int ret;
|
2019-03-15 22:20:46 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
if (race_fault()) {
|
2022-08-27 16:48:36 +00:00
|
|
|
trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
|
2022-07-18 03:06:38 +00:00
|
|
|
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-03 23:41:44 +00:00
|
|
|
/*
|
|
|
|
* Check if the insert will fit in the leaf node with the write lock
|
|
|
|
* held, otherwise another thread could write the node changing the
|
|
|
|
* amount of space available:
|
|
|
|
*/
|
2019-02-26 22:13:46 +00:00
|
|
|
|
2019-10-28 23:35:13 +00:00
|
|
|
prefetch(&trans->c->journal.flags);
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2021-06-07 18:54:56 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
2019-10-28 23:35:13 +00:00
|
|
|
/* Multiple inserts might go to same leaf: */
|
2020-01-01 00:37:10 +00:00
|
|
|
if (!same_leaf_as_prev(trans, i))
|
2019-10-28 23:35:13 +00:00
|
|
|
u64s = 0;
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2019-10-28 23:35:13 +00:00
|
|
|
u64s += i->k->k.u64s;
|
2021-08-30 20:08:34 +00:00
|
|
|
ret = !i->cached
|
|
|
|
? btree_key_can_insert(trans, insert_l(i)->b, u64s)
|
2023-02-09 18:22:12 +00:00
|
|
|
: btree_key_can_insert_cached(trans, flags, i->path, u64s);
|
2019-10-28 23:35:13 +00:00
|
|
|
if (ret) {
|
|
|
|
*stopped_at = i;
|
|
|
|
return ret;
|
2019-03-11 18:59:58 +00:00
|
|
|
}
|
2019-10-28 23:35:13 +00:00
|
|
|
}
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
if (trans->nr_wb_updates &&
|
|
|
|
trans->nr_wb_updates + c->btree_write_buffer.state.nr > c->btree_write_buffer.size)
|
|
|
|
return -BCH_ERR_btree_insert_need_flush_buffer;
|
|
|
|
|
2019-03-15 21:11:58 +00:00
|
|
|
/*
|
|
|
|
* Don't get journal reservation until after we know insert will
|
|
|
|
* succeed:
|
|
|
|
*/
|
2023-02-09 18:22:12 +00:00
|
|
|
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
|
2019-10-19 23:03:23 +00:00
|
|
|
ret = bch2_trans_journal_res_get(trans,
|
2023-06-27 21:32:38 +00:00
|
|
|
(flags & BCH_WATERMARK_MASK)|
|
2019-10-19 23:03:23 +00:00
|
|
|
JOURNAL_RES_GET_NONBLOCK);
|
2019-05-15 13:49:46 +00:00
|
|
|
if (ret)
|
2021-11-28 19:08:58 +00:00
|
|
|
return ret;
|
2021-12-31 21:12:54 +00:00
|
|
|
|
|
|
|
if (unlikely(trans->journal_transaction_names))
|
|
|
|
journal_transaction_name(trans);
|
2020-06-10 01:00:29 +00:00
|
|
|
} else {
|
|
|
|
trans->journal_res.seq = c->journal.replay_journal_seq;
|
2019-05-15 13:49:46 +00:00
|
|
|
}
|
2019-02-26 22:13:46 +00:00
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
/*
|
|
|
|
* Not allowed to fail after we've gotten our journal reservation - we
|
|
|
|
* have to use it:
|
|
|
|
*/
|
|
|
|
|
2022-12-13 23:19:30 +00:00
|
|
|
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
|
2023-02-09 18:22:12 +00:00
|
|
|
!(flags & BTREE_INSERT_JOURNAL_REPLAY)) {
|
2020-11-02 23:20:44 +00:00
|
|
|
if (bch2_journal_seq_verify)
|
2021-06-07 18:54:56 +00:00
|
|
|
trans_for_each_update(trans, i)
|
2017-03-17 06:18:50 +00:00
|
|
|
i->k->k.version.lo = trans->journal_res.seq;
|
2020-11-02 23:20:44 +00:00
|
|
|
else if (bch2_inject_invalid_keys)
|
2021-06-07 18:54:56 +00:00
|
|
|
trans_for_each_update(trans, i)
|
2017-03-17 06:18:50 +00:00
|
|
|
i->k->k.version = MAX_VERSION;
|
|
|
|
}
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
if (trans->fs_usage_deltas &&
|
|
|
|
bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
|
2022-12-07 16:39:34 +00:00
|
|
|
return -BCH_ERR_btree_insert_need_mark_replicas;
|
2021-11-28 19:08:58 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
if (trans->nr_wb_updates) {
|
|
|
|
EBUG_ON(flags & BTREE_INSERT_JOURNAL_REPLAY);
|
|
|
|
|
|
|
|
ret = bch2_btree_insert_keys_write_buffer(trans);
|
|
|
|
if (ret)
|
|
|
|
goto revert_fs_usage;
|
|
|
|
}
|
|
|
|
|
2023-03-15 18:41:07 +00:00
|
|
|
h = trans->hooks;
|
|
|
|
while (h) {
|
|
|
|
ret = h->fn(trans, h);
|
|
|
|
if (ret)
|
|
|
|
goto revert_fs_usage;
|
|
|
|
h = h->next;
|
|
|
|
}
|
|
|
|
|
2019-09-22 22:49:16 +00:00
|
|
|
trans_for_each_update(trans, i)
|
2022-01-05 03:32:09 +00:00
|
|
|
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
|
2022-02-24 16:30:17 +00:00
|
|
|
ret = run_one_mem_trigger(trans, i, i->flags);
|
2022-01-05 03:32:09 +00:00
|
|
|
if (ret)
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
goto fatal_err;
|
2022-01-05 03:32:09 +00:00
|
|
|
}
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2022-01-05 03:32:09 +00:00
|
|
|
if (unlikely(c->gc_pos.phase)) {
|
2022-02-24 16:30:17 +00:00
|
|
|
ret = bch2_trans_commit_run_gc_triggers(trans);
|
2022-01-05 03:32:09 +00:00
|
|
|
if (ret)
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
goto fatal_err;
|
2022-01-05 03:32:09 +00:00
|
|
|
}
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2022-12-13 23:19:30 +00:00
|
|
|
if (unlikely(trans->extra_journal_entries.nr)) {
|
|
|
|
memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
|
|
|
|
trans->extra_journal_entries.data,
|
|
|
|
trans->extra_journal_entries.nr);
|
|
|
|
|
|
|
|
trans->journal_res.offset += trans->extra_journal_entries.nr;
|
|
|
|
trans->journal_res.u64s -= trans->extra_journal_entries.nr;
|
|
|
|
}
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
struct journal *j = &c->journal;
|
|
|
|
struct jset_entry *entry;
|
2022-06-05 19:32:57 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
2022-06-05 19:32:57 +00:00
|
|
|
if (i->key_cache_already_flushed)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
|
|
|
continue;
|
|
|
|
|
2022-11-23 23:46:03 +00:00
|
|
|
verify_update_old_key(trans, i);
|
|
|
|
|
2022-06-05 19:32:57 +00:00
|
|
|
if (trans->journal_transaction_names) {
|
|
|
|
entry = bch2_journal_add_entry(j, &trans->journal_res,
|
|
|
|
BCH_JSET_ENTRY_overwrite,
|
|
|
|
i->btree_id, i->level,
|
|
|
|
i->old_k.u64s);
|
2023-10-31 22:05:22 +00:00
|
|
|
bkey_reassemble((struct bkey_i *) entry->start,
|
2022-06-05 19:32:57 +00:00
|
|
|
(struct bkey_s_c) { &i->old_k, i->old_v });
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = bch2_journal_add_entry(j, &trans->journal_res,
|
|
|
|
BCH_JSET_ENTRY_btree_keys,
|
|
|
|
i->btree_id, i->level,
|
|
|
|
i->k->k.u64s);
|
2023-10-31 22:05:22 +00:00
|
|
|
bkey_copy((struct bkey_i *) entry->start, i->k);
|
2022-06-05 19:32:57 +00:00
|
|
|
}
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
trans_for_each_wb_update(trans, wb) {
|
|
|
|
entry = bch2_journal_add_entry(j, &trans->journal_res,
|
|
|
|
BCH_JSET_ENTRY_btree_keys,
|
|
|
|
wb->btree, 0,
|
|
|
|
wb->k.k.u64s);
|
2023-10-31 22:05:22 +00:00
|
|
|
bkey_copy((struct bkey_i *) entry->start, &wb->k);
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
}
|
|
|
|
|
2022-06-05 19:32:57 +00:00
|
|
|
if (trans->journal_seq)
|
|
|
|
*trans->journal_seq = trans->journal_res.seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
i->k->k.needs_whiteout = false;
|
|
|
|
|
2023-07-19 12:53:05 +00:00
|
|
|
if (!i->cached) {
|
|
|
|
u64 seq = trans->journal_res.seq;
|
|
|
|
|
|
|
|
if (i->flags & BTREE_UPDATE_PREJOURNAL)
|
|
|
|
seq = i->seq;
|
|
|
|
|
|
|
|
bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
|
|
|
|
} else if (!i->key_cache_already_flushed)
|
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 14:03:37 +00:00
|
|
|
bch2_btree_insert_key_cached(trans, flags, i);
|
2022-08-11 17:23:04 +00:00
|
|
|
else {
|
2022-06-05 19:32:57 +00:00
|
|
|
bch2_btree_key_cache_drop(trans, i->path);
|
2022-08-11 17:23:04 +00:00
|
|
|
btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE);
|
|
|
|
}
|
2022-06-05 19:32:57 +00:00
|
|
|
}
|
2021-11-28 19:08:58 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
return 0;
|
|
|
|
fatal_err:
|
|
|
|
bch2_fatal_error(c);
|
|
|
|
revert_fs_usage:
|
|
|
|
if (trans->fs_usage_deltas)
|
|
|
|
bch2_trans_fs_usage_revert(trans, trans->fs_usage_deltas);
|
2019-10-19 23:03:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-12-31 22:54:13 +00:00
|
|
|
static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct btree_insert_entry *i;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
struct btree_write_buffered_key *wb;
|
2021-12-31 22:54:13 +00:00
|
|
|
|
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
|
|
|
|
trans_for_each_wb_update(trans, wb)
|
|
|
|
bch2_journal_key_overwritten(trans->c, wb->btree, 0, wb->k.k.p);
|
2021-12-31 22:54:13 +00:00
|
|
|
}
|
|
|
|
|
2023-10-20 04:01:53 +00:00
|
|
|
static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
|
|
|
|
enum bkey_invalid_flags flags,
|
2022-11-03 15:14:04 +00:00
|
|
|
struct btree_insert_entry *i,
|
|
|
|
struct printbuf *err)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
|
|
|
|
printbuf_reset(err);
|
|
|
|
prt_printf(err, "invalid bkey on insert from %s -> %ps",
|
|
|
|
trans->fn, (void *) i->ip_allocated);
|
|
|
|
prt_newline(err);
|
|
|
|
printbuf_indent_add(err, 2);
|
|
|
|
|
|
|
|
bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
|
|
|
|
prt_newline(err);
|
|
|
|
|
2023-10-20 04:01:53 +00:00
|
|
|
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err);
|
2022-11-03 15:14:04 +00:00
|
|
|
bch2_print_string_as_lines(KERN_ERR, err->buf);
|
|
|
|
|
|
|
|
bch2_inconsistent_error(c);
|
|
|
|
bch2_dump_trans_updates(trans);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
/*
|
|
|
|
* Get journal reservation, take write locks, and attempt to do btree update(s):
|
|
|
|
*/
|
2023-02-09 18:22:12 +00:00
|
|
|
static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
|
2021-06-04 19:18:10 +00:00
|
|
|
struct btree_insert_entry **stopped_at,
|
|
|
|
unsigned long trace_ip)
|
2019-10-19 23:03:23 +00:00
|
|
|
{
|
2021-03-29 05:13:31 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2019-10-19 23:03:23 +00:00
|
|
|
struct btree_insert_entry *i;
|
2023-07-21 03:13:43 +00:00
|
|
|
int ret = 0, u64s_delta = 0;
|
2019-10-19 23:03:23 +00:00
|
|
|
|
2021-06-07 18:54:56 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
2022-02-24 16:02:58 +00:00
|
|
|
if (i->cached)
|
2021-08-28 00:55:44 +00:00
|
|
|
continue;
|
2021-03-29 05:13:31 +00:00
|
|
|
|
2021-08-28 00:55:44 +00:00
|
|
|
u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
|
2022-02-24 16:02:58 +00:00
|
|
|
u64s_delta -= i->old_btree_u64s;
|
2021-08-28 00:55:44 +00:00
|
|
|
|
|
|
|
if (!same_leaf_as_next(trans, i)) {
|
|
|
|
if (u64s_delta <= 0) {
|
2021-08-30 19:18:31 +00:00
|
|
|
ret = bch2_foreground_maybe_merge(trans, i->path,
|
2023-02-09 18:22:12 +00:00
|
|
|
i->level, flags);
|
2021-08-28 00:55:44 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
}
|
2021-03-29 05:13:31 +00:00
|
|
|
|
2021-08-28 00:55:44 +00:00
|
|
|
u64s_delta = 0;
|
2021-03-29 05:13:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-07 00:49:47 +00:00
|
|
|
ret = bch2_trans_lock_write(trans);
|
2021-09-08 01:25:32 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
2019-10-28 23:35:13 +00:00
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
|
2019-10-28 23:35:13 +00:00
|
|
|
|
2021-12-26 01:07:00 +00:00
|
|
|
if (!ret && unlikely(trans->journal_replay_not_finished))
|
2021-12-31 22:54:13 +00:00
|
|
|
bch2_drop_overwrites_from_journal(trans);
|
|
|
|
|
2023-11-07 00:49:47 +00:00
|
|
|
bch2_trans_unlock_write(trans);
|
2019-10-19 23:03:23 +00:00
|
|
|
|
2020-05-25 18:57:06 +00:00
|
|
|
if (!ret && trans->journal_pin)
|
2021-03-29 05:13:31 +00:00
|
|
|
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
|
2020-05-25 18:57:06 +00:00
|
|
|
trans->journal_pin, NULL);
|
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
/*
|
|
|
|
* Drop journal reservation after dropping write locks, since dropping
|
|
|
|
* the journal reservation may kick off a journal write:
|
|
|
|
*/
|
2021-03-29 05:13:31 +00:00
|
|
|
bch2_journal_res_put(&c->journal, &trans->journal_res);
|
2019-10-19 23:03:23 +00:00
|
|
|
|
2023-10-27 19:23:46 +00:00
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2021-03-31 21:52:52 +00:00
|
|
|
static int journal_reclaim_wait_done(struct bch_fs *c)
|
|
|
|
{
|
2021-04-15 02:15:55 +00:00
|
|
|
int ret = bch2_journal_error(&c->journal) ?:
|
|
|
|
!bch2_btree_key_cache_must_wait(c);
|
2021-03-31 21:52:52 +00:00
|
|
|
|
|
|
|
if (!ret)
|
2021-04-15 02:15:55 +00:00
|
|
|
journal_reclaim_kick(&c->journal);
|
2021-03-31 21:52:52 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-22 01:12:01 +00:00
|
|
|
static noinline
|
2023-02-09 18:22:12 +00:00
|
|
|
int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
|
2019-03-22 01:12:01 +00:00
|
|
|
struct btree_insert_entry *i,
|
2021-06-04 19:18:10 +00:00
|
|
|
int ret, unsigned long trace_ip)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
|
2018-11-07 22:48:32 +00:00
|
|
|
switch (ret) {
|
2022-12-07 16:39:34 +00:00
|
|
|
case -BCH_ERR_btree_insert_btree_node_full:
|
2023-02-09 18:22:12 +00:00
|
|
|
ret = bch2_btree_split_leaf(trans, i->path, flags);
|
2022-07-18 03:06:38 +00:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2022-08-27 16:48:36 +00:00
|
|
|
trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
|
2018-11-07 22:48:32 +00:00
|
|
|
break;
|
2022-12-07 16:39:34 +00:00
|
|
|
case -BCH_ERR_btree_insert_need_mark_replicas:
|
2023-05-30 08:59:30 +00:00
|
|
|
ret = drop_locks_do(trans,
|
|
|
|
bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas));
|
2018-11-07 22:48:32 +00:00
|
|
|
break;
|
2022-12-13 20:17:40 +00:00
|
|
|
case -BCH_ERR_journal_res_get_blocked:
|
2023-07-10 15:17:56 +00:00
|
|
|
/*
|
|
|
|
* XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
|
|
|
|
* flag
|
|
|
|
*/
|
2023-02-09 18:22:12 +00:00
|
|
|
if ((flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
|
2023-06-27 21:32:38 +00:00
|
|
|
(flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
|
2022-07-18 03:06:38 +00:00
|
|
|
ret = -BCH_ERR_journal_reclaim_would_deadlock;
|
2021-07-25 21:19:52 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-04-03 20:24:13 +00:00
|
|
|
|
2023-05-30 08:59:30 +00:00
|
|
|
ret = drop_locks_do(trans,
|
|
|
|
bch2_trans_journal_res_get(trans,
|
2023-06-27 21:32:38 +00:00
|
|
|
(flags & BCH_WATERMARK_MASK)|
|
2023-05-30 08:59:30 +00:00
|
|
|
JOURNAL_RES_GET_CHECK));
|
2020-11-20 02:40:03 +00:00
|
|
|
break;
|
2022-12-07 16:39:34 +00:00
|
|
|
case -BCH_ERR_btree_insert_need_journal_reclaim:
|
2020-11-20 02:40:03 +00:00
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
|
2021-04-29 04:21:54 +00:00
|
|
|
|
2021-04-15 02:15:55 +00:00
|
|
|
wait_event_freezable(c->journal.reclaim_wait,
|
|
|
|
(ret = journal_reclaim_wait_done(c)));
|
2021-03-31 21:52:52 +00:00
|
|
|
if (ret < 0)
|
2021-07-25 21:19:52 +00:00
|
|
|
break;
|
2020-11-20 02:40:03 +00:00
|
|
|
|
2022-07-18 03:06:38 +00:00
|
|
|
ret = bch2_trans_relock(trans);
|
2019-03-15 21:11:58 +00:00
|
|
|
break;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
case -BCH_ERR_btree_insert_need_flush_buffer: {
|
|
|
|
struct btree_write_buffer *wb = &c->btree_write_buffer;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (wb->state.nr > wb->size * 3 / 4) {
|
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
mutex_lock(&wb->flush_lock);
|
|
|
|
|
2023-05-30 08:59:30 +00:00
|
|
|
if (wb->state.nr > wb->size * 3 / 4) {
|
|
|
|
bch2_trans_begin(trans);
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
ret = __bch2_btree_write_buffer_flush(trans,
|
|
|
|
flags|BTREE_INSERT_NOCHECK_RW, true);
|
2023-05-30 08:59:30 +00:00
|
|
|
if (!ret) {
|
|
|
|
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
|
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
|
|
|
|
}
|
|
|
|
} else {
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
mutex_unlock(&wb->flush_lock);
|
2023-05-30 08:59:30 +00:00
|
|
|
ret = bch2_trans_relock(trans);
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2018-11-07 22:48:32 +00:00
|
|
|
default:
|
|
|
|
BUG_ON(ret >= 0);
|
|
|
|
break;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-07-18 03:06:38 +00:00
|
|
|
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
|
2022-09-18 21:10:33 +00:00
|
|
|
|
|
|
|
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
|
2023-02-09 18:22:12 +00:00
|
|
|
!(flags & BTREE_INSERT_NOWAIT) &&
|
|
|
|
(flags & BTREE_INSERT_NOFAIL), c,
|
2022-09-18 21:10:33 +00:00
|
|
|
"%s: incorrectly got %s\n", __func__, bch2_err_str(ret));
|
2021-05-20 19:49:23 +00:00
|
|
|
|
2019-03-22 01:12:01 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
static noinline int
|
2023-02-09 18:22:12 +00:00
|
|
|
bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
|
2019-03-22 01:12:01 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
int ret;
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
if (likely(!(flags & BTREE_INSERT_LAZY_RW)) ||
|
2022-01-10 01:55:58 +00:00
|
|
|
test_bit(BCH_FS_STARTED, &c->flags))
|
2022-12-12 01:37:11 +00:00
|
|
|
return -BCH_ERR_erofs_trans_commit;
|
2019-03-22 01:12:01 +00:00
|
|
|
|
2023-05-28 22:06:27 +00:00
|
|
|
ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
|
2019-10-19 23:03:23 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-03-22 01:12:01 +00:00
|
|
|
|
2023-02-09 17:21:45 +00:00
|
|
|
bch2_write_ref_get(c, BCH_WRITE_REF_trans);
|
2019-10-19 23:03:23 +00:00
|
|
|
return 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-02-19 07:48:27 +00:00
|
|
|
/*
|
|
|
|
* This is for updates done in the early part of fsck - btree_gc - before we've
|
|
|
|
* gone RW. we only add the new key to the list of keys for journal replay to
|
|
|
|
* do.
|
|
|
|
*/
|
|
|
|
static noinline int
|
|
|
|
do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-12-31 21:12:54 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2019-08-22 20:34:59 +00:00
|
|
|
struct btree_insert_entry *i = NULL;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
struct btree_write_buffered_key *wb;
|
2019-03-21 20:28:57 +00:00
|
|
|
int ret = 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-07-10 17:44:42 +00:00
|
|
|
if (!trans->nr_updates &&
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
!trans->nr_wb_updates &&
|
2022-03-29 20:29:10 +00:00
|
|
|
!trans->extra_journal_entries.nr)
|
2021-01-22 00:30:35 +00:00
|
|
|
goto out_reset;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
if (flags & BTREE_INSERT_GC_LOCK_HELD)
|
2021-12-31 21:12:54 +00:00
|
|
|
lockdep_assert_held(&c->gc_lock);
|
2019-03-22 01:12:01 +00:00
|
|
|
|
2022-02-19 07:40:45 +00:00
|
|
|
ret = bch2_trans_commit_run_triggers(trans);
|
|
|
|
if (ret)
|
|
|
|
goto out_reset;
|
|
|
|
|
2023-08-23 00:29:35 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
enum bkey_invalid_flags invalid_flags = 0;
|
|
|
|
|
|
|
|
if (!(flags & BTREE_INSERT_JOURNAL_REPLAY))
|
|
|
|
invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
|
|
|
|
|
|
|
|
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
|
|
|
|
i->bkey_type, invalid_flags, &buf)))
|
2023-10-20 04:01:53 +00:00
|
|
|
ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
|
2023-08-23 00:29:35 +00:00
|
|
|
btree_insert_entry_checks(trans, i);
|
|
|
|
printbuf_exit(&buf);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-19 07:48:27 +00:00
|
|
|
if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
|
|
|
|
ret = do_bch2_trans_commit_to_journal_replay(trans);
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
if (!(flags & BTREE_INSERT_NOCHECK_RW) &&
|
2023-02-09 17:21:45 +00:00
|
|
|
unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) {
|
2023-02-09 18:22:12 +00:00
|
|
|
ret = bch2_trans_commit_get_rw_cold(trans, flags);
|
2022-02-19 07:40:45 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
if (c->btree_write_buffer.state.nr > c->btree_write_buffer.size / 2 &&
|
|
|
|
mutex_trylock(&c->btree_write_buffer.flush_lock)) {
|
|
|
|
bch2_trans_begin(trans);
|
|
|
|
bch2_trans_unlock(trans);
|
|
|
|
|
|
|
|
ret = __bch2_btree_write_buffer_flush(trans,
|
|
|
|
flags|BTREE_INSERT_NOCHECK_RW, true);
|
|
|
|
if (!ret) {
|
|
|
|
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
|
|
|
|
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-04-17 21:30:49 +00:00
|
|
|
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
|
|
|
|
|
2022-03-29 20:29:10 +00:00
|
|
|
trans->journal_u64s = trans->extra_journal_entries.nr;
|
2021-12-31 21:12:54 +00:00
|
|
|
trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
|
|
|
|
if (trans->journal_transaction_names)
|
2022-06-05 19:29:00 +00:00
|
|
|
trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
|
2021-12-31 21:12:54 +00:00
|
|
|
|
2019-12-24 23:03:53 +00:00
|
|
|
trans_for_each_update(trans, i) {
|
2022-11-23 03:06:04 +00:00
|
|
|
EBUG_ON(!i->path->should_be_locked);
|
2021-03-10 00:37:40 +00:00
|
|
|
|
2022-09-17 18:36:24 +00:00
|
|
|
ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1);
|
|
|
|
if (unlikely(ret))
|
2021-02-21 01:51:57 +00:00
|
|
|
goto out;
|
|
|
|
|
2022-11-23 03:06:04 +00:00
|
|
|
EBUG_ON(!btree_node_intent_locked(i->path, i->level));
|
2021-03-31 20:43:50 +00:00
|
|
|
|
2022-06-05 19:32:57 +00:00
|
|
|
if (i->key_cache_already_flushed)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
|
|
|
continue;
|
|
|
|
|
2023-11-05 02:54:26 +00:00
|
|
|
/* we're going to journal the key being updated: */
|
|
|
|
trans->journal_u64s += jset_u64s(i->k->k.u64s);
|
2022-06-05 19:32:57 +00:00
|
|
|
|
|
|
|
/* and we're also going to log the overwrite: */
|
|
|
|
if (trans->journal_transaction_names)
|
|
|
|
trans->journal_u64s += jset_u64s(i->old_k.u64s);
|
2019-12-24 23:03:53 +00:00
|
|
|
}
|
2021-06-12 19:45:56 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
trans_for_each_wb_update(trans, wb)
|
|
|
|
trans->journal_u64s += jset_u64s(wb->k.k.u64s);
|
|
|
|
|
2021-06-12 19:45:56 +00:00
|
|
|
if (trans->extra_journal_res) {
|
2021-12-31 21:12:54 +00:00
|
|
|
ret = bch2_disk_reservation_add(c, trans->disk_res,
|
2021-06-12 19:45:56 +00:00
|
|
|
trans->extra_journal_res,
|
2023-02-09 18:22:12 +00:00
|
|
|
(flags & BTREE_INSERT_NOFAIL)
|
2021-06-12 19:45:56 +00:00
|
|
|
? BCH_DISK_RESERVATION_NOFAIL : 0);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2019-10-19 23:03:23 +00:00
|
|
|
retry:
|
2023-02-01 21:15:51 +00:00
|
|
|
bch2_trans_verify_not_in_restart(trans);
|
2019-10-19 23:03:23 +00:00
|
|
|
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
|
2019-03-22 02:19:57 +00:00
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
ret = do_bch2_trans_commit(trans, flags, &i, _RET_IP_);
|
2019-03-22 02:19:57 +00:00
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
/* make sure we didn't drop or screw up locks: */
|
2021-08-30 18:45:11 +00:00
|
|
|
bch2_trans_verify_locks(trans);
|
2019-10-19 23:03:23 +00:00
|
|
|
|
2019-03-22 01:12:01 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2022-04-17 22:06:31 +00:00
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
trace_and_count(c, transaction_commit, trans, _RET_IP_);
|
2019-03-22 01:12:01 +00:00
|
|
|
out:
|
2023-02-09 18:22:12 +00:00
|
|
|
if (likely(!(flags & BTREE_INSERT_NOCHECK_RW)))
|
2023-02-09 17:21:45 +00:00
|
|
|
bch2_write_ref_put(c, BCH_WRITE_REF_trans);
|
2021-01-22 00:30:35 +00:00
|
|
|
out_reset:
|
2023-10-27 19:23:46 +00:00
|
|
|
if (!ret)
|
|
|
|
bch2_trans_downgrade(trans);
|
2022-05-29 15:38:48 +00:00
|
|
|
bch2_trans_reset_updates(trans);
|
2021-07-25 03:57:28 +00:00
|
|
|
|
2019-03-14 02:44:04 +00:00
|
|
|
return ret;
|
2019-03-22 01:12:01 +00:00
|
|
|
err:
|
2023-02-09 18:22:12 +00:00
|
|
|
ret = bch2_trans_commit_error(trans, flags, i, ret, _RET_IP_);
|
2019-10-19 23:03:23 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
goto retry;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|