2017-03-17 06:18:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _BCACHEFS_JOURNAL_IO_H
|
|
|
|
#define _BCACHEFS_JOURNAL_IO_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only used for holding the journal entries we read in btree_journal_read()
|
|
|
|
* during cache_registration
|
|
|
|
*/
|
|
|
|
struct journal_replay {
|
2022-02-19 06:18:18 +00:00
|
|
|
struct journal_ptr {
|
|
|
|
u8 dev;
|
|
|
|
u32 bucket;
|
|
|
|
u32 bucket_offset;
|
|
|
|
u64 sector;
|
|
|
|
} ptrs[BCH_REPLICAS_MAX];
|
2021-01-26 21:04:12 +00:00
|
|
|
unsigned nr_ptrs;
|
|
|
|
|
2020-08-24 19:58:26 +00:00
|
|
|
/* checksum error, but we may want to try using it anyways: */
|
|
|
|
bool bad;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
bool ignore;
|
2017-03-17 06:18:50 +00:00
|
|
|
/* must be last: */
|
|
|
|
struct jset j;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
|
|
|
|
struct jset_entry *entry, unsigned type)
|
|
|
|
{
|
|
|
|
while (entry < vstruct_last(jset)) {
|
|
|
|
if (entry->type == type)
|
|
|
|
return entry;
|
|
|
|
|
|
|
|
entry = vstruct_next(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define for_each_jset_entry_type(entry, jset, type) \
|
|
|
|
for (entry = (jset)->start; \
|
|
|
|
(entry = __jset_entry_type_next(jset, entry, type)); \
|
|
|
|
entry = vstruct_next(entry))
|
|
|
|
|
|
|
|
#define for_each_jset_key(k, _n, entry, jset) \
|
|
|
|
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys) \
|
|
|
|
vstruct_for_each_safe(entry, k, _n)
|
|
|
|
|
2021-12-31 22:06:29 +00:00
|
|
|
int bch2_journal_entry_validate(struct bch_fs *, const char *,
|
|
|
|
struct jset_entry *, unsigned, int, int);
|
|
|
|
void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *,
|
|
|
|
struct jset_entry *);
|
2021-03-05 00:06:26 +00:00
|
|
|
|
2022-02-19 06:18:18 +00:00
|
|
|
void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
|
|
|
|
struct journal_replay *);
|
|
|
|
|
2022-03-21 04:15:53 +00:00
|
|
|
int bch2_journal_read(struct bch_fs *, u64 *, u64 *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
void bch2_journal_write(struct closure *);
|
|
|
|
|
|
|
|
#endif /* _BCACHEFS_JOURNAL_IO_H */
|