mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
bcachefs: bch2_btree_write_buffer_flush_going_ro()
The write buffer needs to be specifically flushed when going RO: keys in the journal that haven't yet been moved to the write buffer don't have a journal pin yet. This fixes numerous syzbot bugs, all with symptoms of still doing writes after we've got RO. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
8440da9331
commit
ca43f73cd1
@ -277,6 +277,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_journal_error(&c->journal);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
@ -491,7 +495,8 @@ static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
|
||||
static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq,
|
||||
bool *did_work)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_write_buffer *wb = &c->btree_write_buffer;
|
||||
@ -502,6 +507,8 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
|
||||
|
||||
fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
|
||||
|
||||
*did_work |= wb->inc.keys.nr || wb->flushing.keys.nr;
|
||||
|
||||
/*
|
||||
* On memory allocation failure, bch2_btree_write_buffer_flush_locked()
|
||||
* is not guaranteed to empty wb->inc:
|
||||
@ -521,17 +528,34 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j,
|
||||
struct journal_entry_pin *_pin, u64 seq)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
bool did_work = false;
|
||||
|
||||
return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq));
|
||||
return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work));
|
||||
}
|
||||
|
||||
int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
bool did_work = false;
|
||||
|
||||
trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
|
||||
|
||||
return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal));
|
||||
return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* The write buffer requires flushing when going RO: keys in the journal for the
|
||||
* write buffer don't have a journal pin yet
|
||||
*/
|
||||
bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c)
|
||||
{
|
||||
if (bch2_journal_error(&c->journal))
|
||||
return false;
|
||||
|
||||
bool did_work = false;
|
||||
bch2_trans_run(c, btree_write_buffer_flush_seq(trans,
|
||||
journal_cur_seq(&c->journal), &did_work));
|
||||
return did_work;
|
||||
}
|
||||
|
||||
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
|
||||
|
@ -21,6 +21,7 @@ static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c)
|
||||
|
||||
struct btree_trans;
|
||||
int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
|
||||
bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *);
|
||||
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
|
||||
int bch2_btree_write_buffer_tryflush(struct btree_trans *);
|
||||
|
||||
|
@ -272,6 +272,7 @@ static void __bch2_fs_read_only(struct bch_fs *c)
|
||||
clean_passes++;
|
||||
|
||||
if (bch2_btree_interior_updates_flush(c) ||
|
||||
bch2_btree_write_buffer_flush_going_ro(c) ||
|
||||
bch2_journal_flush_all_pins(&c->journal) ||
|
||||
bch2_btree_flush_all_writes(c) ||
|
||||
seq != atomic64_read(&c->journal.seq)) {
|
||||
|
Loading…
Reference in New Issue
Block a user