mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
bcachefs: Fix W=12 build errors
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b5e85d4d0c
commit
96dea3d599
@ -1200,15 +1200,15 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (need_update) {
|
||||
struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
memcpy(k, &g, sizeof(g));
|
||||
memcpy(u, &g, sizeof(g));
|
||||
|
||||
ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
|
||||
ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -1354,15 +1354,14 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (need_update) {
|
||||
struct bkey_i *k;
|
||||
struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
|
||||
k = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memcpy(k, &g, sizeof(g));
|
||||
ret = bch2_trans_update(trans, iter, k, 0);
|
||||
memcpy(u, &g, sizeof(g));
|
||||
ret = bch2_trans_update(trans, iter, u, 0);
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
|
@ -502,9 +502,14 @@ again:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_bucket_alloc - allocate a single bucket from a specific device
|
||||
* bch2_bucket_alloc_trans - allocate a single bucket from a specific device
|
||||
* @trans: transaction object
|
||||
* @ca: device to allocate from
|
||||
* @watermark: how important is this allocation?
|
||||
* @cl: if not NULL, closure to be used to wait if buckets not available
|
||||
* @usage: for secondarily also returning the current device usage
|
||||
*
|
||||
* Returns index of bucket on success, 0 on failure
|
||||
* Returns: an open_bucket on success, or an ERR_PTR() on failure.
|
||||
*/
|
||||
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
@ -775,7 +780,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
struct dev_alloc_list devs_sorted;
|
||||
struct ec_stripe_head *h;
|
||||
struct open_bucket *ob;
|
||||
struct bch_dev *ca;
|
||||
unsigned i, ec_idx;
|
||||
int ret = 0;
|
||||
|
||||
@ -805,8 +809,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
}
|
||||
goto out_put_head;
|
||||
got_bucket:
|
||||
ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
ob->ec_idx = ec_idx;
|
||||
ob->ec = h->s;
|
||||
ec_stripe_new_get(h->s, STRIPE_REF_io);
|
||||
@ -1032,10 +1034,13 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
|
||||
/**
|
||||
* should_drop_bucket - check if this is open_bucket should go away
|
||||
* @ob: open_bucket to predicate on
|
||||
* @c: filesystem handle
|
||||
* @ca: if set, we're killing buckets for a particular device
|
||||
* @ec: if true, we're shutting down erasure coding and killing all ec
|
||||
* open_buckets
|
||||
* otherwise, return true
|
||||
* Returns: true if we should kill this open_bucket
|
||||
*
|
||||
* We're killing open_buckets because we're shutting down a device, erasure
|
||||
* coding, or the entire filesystem - check if this open_bucket matches:
|
||||
|
@ -351,7 +351,6 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter alloc_iter = { NULL };
|
||||
struct bch_dev *ca;
|
||||
struct bkey_s_c alloc_k;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
@ -363,8 +362,6 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
|
||||
goto out;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
|
||||
bp_pos_to_bucket(c, k.k->p), 0);
|
||||
ret = bkey_err(alloc_k);
|
||||
@ -629,7 +626,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
enum btree_id btree_id;
|
||||
struct bpos_level last_flushed = { UINT_MAX };
|
||||
struct bpos_level last_flushed = { UINT_MAX, POS_MIN };
|
||||
int ret = 0;
|
||||
|
||||
for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
|
||||
|
@ -371,7 +371,7 @@ BCH_DEBUG_PARAMS()
|
||||
#undef BCH_DEBUG_PARAM
|
||||
|
||||
#ifndef CONFIG_BCACHEFS_DEBUG
|
||||
#define BCH_DEBUG_PARAM(name, description) static const bool bch2_##name;
|
||||
#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
|
||||
BCH_DEBUG_PARAMS_DEBUG()
|
||||
#undef BCH_DEBUG_PARAM
|
||||
#endif
|
||||
|
@ -83,8 +83,8 @@ typedef uuid_t __uuid_t;
|
||||
#endif
|
||||
|
||||
#define BITMASK(name, type, field, offset, end) \
|
||||
static const unsigned name##_OFFSET = offset; \
|
||||
static const unsigned name##_BITS = (end - offset); \
|
||||
static const __maybe_unused unsigned name##_OFFSET = offset; \
|
||||
static const __maybe_unused unsigned name##_BITS = (end - offset); \
|
||||
\
|
||||
static inline __u64 name(const type *k) \
|
||||
{ \
|
||||
@ -98,9 +98,9 @@ static inline void SET_##name(type *k, __u64 v) \
|
||||
}
|
||||
|
||||
#define LE_BITMASK(_bits, name, type, field, offset, end) \
|
||||
static const unsigned name##_OFFSET = offset; \
|
||||
static const unsigned name##_BITS = (end - offset); \
|
||||
static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
|
||||
static const __maybe_unused unsigned name##_OFFSET = offset; \
|
||||
static const __maybe_unused unsigned name##_BITS = (end - offset); \
|
||||
static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
|
||||
\
|
||||
static inline __u64 name(const type *k) \
|
||||
{ \
|
||||
@ -1668,7 +1668,8 @@ enum bcachefs_metadata_version {
|
||||
bcachefs_metadata_version_max
|
||||
};
|
||||
|
||||
static const unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
|
||||
static const __maybe_unused
|
||||
unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
|
||||
|
||||
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
|
||||
|
||||
@ -1975,7 +1976,7 @@ enum bch_csum_type {
|
||||
BCH_CSUM_NR
|
||||
};
|
||||
|
||||
static const unsigned bch_crc_bytes[] = {
|
||||
static const __maybe_unused unsigned bch_crc_bytes[] = {
|
||||
[BCH_CSUM_none] = 0,
|
||||
[BCH_CSUM_crc32c_nonzero] = 4,
|
||||
[BCH_CSUM_crc32c] = 4,
|
||||
|
@ -308,9 +308,14 @@ struct bpos __bkey_unpack_pos(const struct bkey_format *format,
|
||||
|
||||
/**
|
||||
* bch2_bkey_pack_key -- pack just the key, not the value
|
||||
* @out: packed result
|
||||
* @in: key to pack
|
||||
* @format: format of packed result
|
||||
*
|
||||
* Returns: true on success, false on failure
|
||||
*/
|
||||
bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
|
||||
const struct bkey_format *format)
|
||||
const struct bkey_format *format)
|
||||
{
|
||||
struct pack_state state = pack_state_init(format, out);
|
||||
u64 *w = out->_data;
|
||||
@ -336,9 +341,12 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
|
||||
|
||||
/**
|
||||
* bch2_bkey_unpack -- unpack the key and the value
|
||||
* @b: btree node of @src key (for packed format)
|
||||
* @dst: unpacked result
|
||||
* @src: packed input
|
||||
*/
|
||||
void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
|
||||
const struct bkey_packed *src)
|
||||
const struct bkey_packed *src)
|
||||
{
|
||||
__bkey_unpack_key(b, &dst->k, src);
|
||||
|
||||
@ -349,19 +357,24 @@ void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
|
||||
|
||||
/**
|
||||
* bch2_bkey_pack -- pack the key and the value
|
||||
* @dst: packed result
|
||||
* @src: unpacked input
|
||||
* @format: format of packed result
|
||||
*
|
||||
* Returns: true on success, false on failure
|
||||
*/
|
||||
bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
|
||||
const struct bkey_format *format)
|
||||
bool bch2_bkey_pack(struct bkey_packed *dst, const struct bkey_i *src,
|
||||
const struct bkey_format *format)
|
||||
{
|
||||
struct bkey_packed tmp;
|
||||
|
||||
if (!bch2_bkey_pack_key(&tmp, &in->k, format))
|
||||
if (!bch2_bkey_pack_key(&tmp, &src->k, format))
|
||||
return false;
|
||||
|
||||
memmove_u64s((u64 *) out + format->key_u64s,
|
||||
&in->v,
|
||||
bkey_val_u64s(&in->k));
|
||||
memcpy_u64s_small(out, &tmp, format->key_u64s);
|
||||
memmove_u64s((u64 *) dst + format->key_u64s,
|
||||
&src->v,
|
||||
bkey_val_u64s(&src->k));
|
||||
memcpy_u64s_small(dst, &tmp, format->key_u64s);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -369,7 +369,6 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
|
||||
{
|
||||
const struct bkey_ops *ops;
|
||||
struct bkey uk;
|
||||
struct bkey_s u;
|
||||
unsigned nr_compat = 5;
|
||||
int i;
|
||||
|
||||
@ -434,7 +433,9 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
|
||||
}
|
||||
|
||||
break;
|
||||
case 4:
|
||||
case 4: {
|
||||
struct bkey_s u;
|
||||
|
||||
if (!bkey_packed(k)) {
|
||||
u = bkey_i_to_s(packed_to_bkey(k));
|
||||
} else {
|
||||
@ -451,6 +452,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
|
||||
if (ops->compat)
|
||||
ops->compat(btree_id, version, big_endian, write, u);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -172,10 +172,10 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
|
||||
printk(KERN_ERR "iter was:");
|
||||
|
||||
btree_node_iter_for_each(_iter, set) {
|
||||
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
|
||||
struct bset_tree *t = bch2_bkey_to_bset(b, k);
|
||||
struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
|
||||
struct bset_tree *t = bch2_bkey_to_bset(b, k2);
|
||||
printk(" [%zi %zi]", t - b->set,
|
||||
k->_data - bset(b, t)->_data);
|
||||
k2->_data - bset(b, t)->_data);
|
||||
}
|
||||
panic("\n");
|
||||
}
|
||||
@ -1269,9 +1269,13 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_node_iter_init - initialize a btree node iterator, starting from a
|
||||
* bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
|
||||
* given position
|
||||
*
|
||||
* @iter: iterator to initialize
|
||||
* @b: btree node to search
|
||||
* @search: search key
|
||||
*
|
||||
* Main entry point to the lookup code for individual btree nodes:
|
||||
*
|
||||
* NOTE:
|
||||
|
@ -885,7 +885,7 @@ retry:
|
||||
}
|
||||
|
||||
if (unlikely(need_relock)) {
|
||||
int ret = bch2_trans_relock(trans) ?:
|
||||
ret = bch2_trans_relock(trans) ?:
|
||||
bch2_btree_path_relock_intent(trans, path);
|
||||
if (ret) {
|
||||
six_unlock_type(&b->c.lock, lock_type);
|
||||
@ -916,11 +916,20 @@ retry:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
|
||||
* bch2_btree_node_get - find a btree node in the cache and lock it, reading it
|
||||
* in from disk if necessary.
|
||||
*
|
||||
* @trans: btree transaction object
|
||||
* @path: btree_path being traversed
|
||||
* @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
|
||||
* @level: level of btree node being looked up (0 == leaf node)
|
||||
* @lock_type: SIX_LOCK_read or SIX_LOCK_intent
|
||||
* @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
|
||||
*
|
||||
* The btree node will have either a read or a write lock held, depending on
|
||||
* the @write parameter.
|
||||
*
|
||||
* Returns: btree node or ERR_PTR()
|
||||
*/
|
||||
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
|
||||
const struct bkey_i *k, unsigned level,
|
||||
@ -979,7 +988,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
|
||||
* relock it specifically:
|
||||
*/
|
||||
if (trans) {
|
||||
int ret = bch2_trans_relock(trans) ?:
|
||||
ret = bch2_trans_relock(trans) ?:
|
||||
bch2_btree_path_relock_intent(trans, path);
|
||||
if (ret) {
|
||||
BUG_ON(!trans->restarted);
|
||||
|
@ -566,8 +566,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
struct bkey_s_c *k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
|
||||
const union bch_extent_entry *entry_c;
|
||||
struct extent_ptr_decoded p = { 0 };
|
||||
bool do_update = false;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@ -577,10 +577,10 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
* XXX
|
||||
* use check_bucket_ref here
|
||||
*/
|
||||
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
|
||||
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
|
||||
|
||||
if (!g->gen_valid &&
|
||||
(c->opts.reconstruct_alloc ||
|
||||
@ -1217,14 +1217,6 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
fsck_err(c, _msg ": got %llu, should be %llu" \
|
||||
, ##__VA_ARGS__, dst->_f, src->_f))) \
|
||||
dst->_f = src->_f
|
||||
#define copy_stripe_field(_f, _msg, ...) \
|
||||
if (dst->_f != src->_f && \
|
||||
(!verify || \
|
||||
fsck_err(c, "stripe %zu has wrong "_msg \
|
||||
": got %u, should be %u", \
|
||||
iter.pos, ##__VA_ARGS__, \
|
||||
dst->_f, src->_f))) \
|
||||
dst->_f = src->_f
|
||||
#define copy_dev_field(_f, _msg, ...) \
|
||||
copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
|
||||
#define copy_fs_field(_f, _msg, ...) \
|
||||
@ -1776,6 +1768,12 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
|
||||
/**
|
||||
* bch2_gc - walk _all_ references to buckets, and recompute them:
|
||||
*
|
||||
* @c: filesystem object
|
||||
* @initial: are we in recovery?
|
||||
* @metadata_only: are we just checking metadata references, or everything?
|
||||
*
|
||||
* Returns: 0 on success, or standard errcode on failure
|
||||
*
|
||||
* Order matters here:
|
||||
* - Concurrent GC relies on the fact that we have a total ordering for
|
||||
* everything that GC walks - see gc_will_visit_node(),
|
||||
@ -1985,11 +1983,9 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
if (btree_type_has_ptrs(i)) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
|
||||
c->gc_gens_btree = i;
|
||||
c->gc_gens_pos = POS_MIN;
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter, i,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
|
@ -336,7 +336,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
|
||||
start_bset->journal_seq = cpu_to_le64(seq);
|
||||
|
||||
if (sorting_entire_node) {
|
||||
unsigned u64s = le16_to_cpu(out->keys.u64s);
|
||||
u64s = le16_to_cpu(out->keys.u64s);
|
||||
|
||||
BUG_ON(bytes != btree_bytes(c));
|
||||
|
||||
@ -410,8 +410,6 @@ void bch2_btree_sort_into(struct bch_fs *c,
|
||||
bch2_verify_btree_nr_keys(dst);
|
||||
}
|
||||
|
||||
#define SORT_CRIT (4096 / sizeof(u64))
|
||||
|
||||
/*
|
||||
* We're about to add another bset to the btree node, so if there's currently
|
||||
* too many bsets - sort some of them together:
|
||||
@ -542,6 +540,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
|
||||
prt_str(out, ": ");
|
||||
}
|
||||
|
||||
__printf(8, 9)
|
||||
static int __btree_err(int ret,
|
||||
struct bch_fs *c,
|
||||
struct bch_dev *ca,
|
||||
@ -622,9 +621,6 @@ __cold
|
||||
void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
{
|
||||
struct bset_tree *t;
|
||||
struct bkey_s_c k;
|
||||
struct bkey unpacked;
|
||||
struct btree_node_iter iter;
|
||||
|
||||
for_each_bset(b, t) {
|
||||
struct bset *i = bset(b, t);
|
||||
@ -660,6 +656,9 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
bch2_bset_set_no_aux_tree(b, b->set);
|
||||
bch2_btree_build_aux_trees(b);
|
||||
|
||||
struct bkey_s_c k;
|
||||
struct bkey unpacked;
|
||||
struct btree_node_iter iter;
|
||||
for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
BUG_ON(bpos_gt(k.k->p, b->data->max_key));
|
||||
@ -908,7 +907,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
|
||||
unsigned u64s;
|
||||
unsigned blacklisted_written, nonblacklisted_written = 0;
|
||||
unsigned ptr_written = btree_ptr_sectors_written(&b->key);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0, retry_read = 0, write = READ;
|
||||
@ -1042,8 +1040,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
sort_iter_add(iter,
|
||||
vstruct_idx(i, 0),
|
||||
vstruct_last(i));
|
||||
|
||||
nonblacklisted_written = b->written;
|
||||
}
|
||||
|
||||
if (ptr_written) {
|
||||
@ -1061,18 +1057,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
true),
|
||||
-BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
|
||||
"found bset signature after last bset");
|
||||
|
||||
/*
|
||||
* Blacklisted bsets are those that were written after the most recent
|
||||
* (flush) journal write. Since there wasn't a flush, they may not have
|
||||
* made it to all devices - which means we shouldn't write new bsets
|
||||
* after them, as that could leave a gap and then reads from that device
|
||||
* wouldn't find all the bsets in that btree node - which means it's
|
||||
* important that we start writing new bsets after the most recent _non_
|
||||
* blacklisted bset:
|
||||
*/
|
||||
blacklisted_written = b->written;
|
||||
b->written = nonblacklisted_written;
|
||||
}
|
||||
|
||||
sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
|
||||
@ -1140,9 +1124,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_node_reset_sib_u64s(b);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (ca->mi.state != BCH_MEMBER_STATE_rw)
|
||||
if (ca2->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
}
|
||||
|
||||
@ -1224,19 +1208,17 @@ start:
|
||||
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
|
||||
rb->start_time);
|
||||
bio_put(&rb->bio);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (saw_error && !btree_node_read_error(b)) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->key.k.p);
|
||||
bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
|
||||
__func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
bch2_btree_node_rewrite_async(c, b);
|
||||
}
|
||||
|
||||
printbuf_exit(&buf);
|
||||
clear_btree_node_read_in_flight(b);
|
||||
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
|
||||
}
|
||||
|
@ -488,7 +488,6 @@ fixup_done:
|
||||
if (!bch2_btree_node_iter_end(node_iter) &&
|
||||
iter_current_key_modified &&
|
||||
b->c.level) {
|
||||
struct bset_tree *t;
|
||||
struct bkey_packed *k, *k2, *p;
|
||||
|
||||
k = bch2_btree_node_iter_peek_all(node_iter, b);
|
||||
@ -2048,8 +2047,12 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_peek: returns first key greater than or equal to iterator's
|
||||
* current position
|
||||
* bch2_btree_iter_peek_upto() - returns first key greater than or equal to
|
||||
* iterator's current position
|
||||
* @iter: iterator to peek from
|
||||
* @end: search limit: returns keys less than or equal to @end
|
||||
*
|
||||
* Returns: key if found, or an error extractable with bkey_err().
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
|
||||
{
|
||||
@ -2186,10 +2189,13 @@ end:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
|
||||
* to iterator's current position, returning keys from every level of the btree.
|
||||
* For keys at different levels of the btree that compare equal, the key from
|
||||
* the lower level (leaf) is returned first.
|
||||
* bch2_btree_iter_peek_all_levels() - returns the first key greater than or
|
||||
* equal to iterator's current position, returning keys from every level of the
|
||||
* btree. For keys at different levels of the btree that compare equal, the key
|
||||
* from the lower level (leaf) is returned first.
|
||||
* @iter: iterator to peek from
|
||||
*
|
||||
* Returns: key if found, or an error extractable with bkey_err().
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
|
||||
{
|
||||
@ -2280,8 +2286,11 @@ out_no_locked:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_next: returns first key greater than iterator's current
|
||||
* bch2_btree_iter_next() - returns first key greater than iterator's current
|
||||
* position
|
||||
* @iter: iterator to peek from
|
||||
*
|
||||
* Returns: key if found, or an error extractable with bkey_err().
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
|
||||
{
|
||||
@ -2292,8 +2301,11 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_peek_prev: returns first key less than or equal to
|
||||
* bch2_btree_iter_peek_prev() - returns first key less than or equal to
|
||||
* iterator's current position
|
||||
* @iter: iterator to peek from
|
||||
*
|
||||
* Returns: key if found, or an error extractable with bkey_err().
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
{
|
||||
@ -2416,8 +2428,11 @@ out_no_locked:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_prev: returns first key less than iterator's current
|
||||
* bch2_btree_iter_prev() - returns first key less than iterator's current
|
||||
* position
|
||||
* @iter: iterator to peek from
|
||||
*
|
||||
* Returns: key if found, or an error extractable with bkey_err().
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
|
||||
{
|
||||
@ -2832,6 +2847,8 @@ static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
|
||||
* bch2_trans_begin() - reset a transaction after a interrupted attempt
|
||||
* @trans: transaction to reset
|
||||
*
|
||||
* Returns: current restart counter, to be used with trans_was_restarted()
|
||||
*
|
||||
* While iterating over nodes or updating nodes a attempt to lock a btree node
|
||||
* may return BCH_ERR_transaction_restart when the trylock fails. When this
|
||||
* occurs bch2_trans_begin() should be called and the transaction retried.
|
||||
|
@ -674,17 +674,17 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
#define lockrestart_do(_trans, _do) \
|
||||
({ \
|
||||
u32 _restart_count; \
|
||||
int _ret; \
|
||||
int _ret2; \
|
||||
\
|
||||
do { \
|
||||
_restart_count = bch2_trans_begin(_trans); \
|
||||
_ret = (_do); \
|
||||
} while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
|
||||
_ret2 = (_do); \
|
||||
} while (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)); \
|
||||
\
|
||||
if (!_ret) \
|
||||
if (!_ret2) \
|
||||
bch2_trans_verify_not_restarted(_trans, _restart_count);\
|
||||
\
|
||||
_ret; \
|
||||
_ret2; \
|
||||
})
|
||||
|
||||
/*
|
||||
@ -699,23 +699,23 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
#define nested_lockrestart_do(_trans, _do) \
|
||||
({ \
|
||||
u32 _restart_count, _orig_restart_count; \
|
||||
int _ret; \
|
||||
int _ret2; \
|
||||
\
|
||||
_restart_count = _orig_restart_count = (_trans)->restart_count; \
|
||||
\
|
||||
while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
|
||||
while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
|
||||
_restart_count = bch2_trans_begin(_trans); \
|
||||
\
|
||||
if (!_ret) \
|
||||
if (!_ret2) \
|
||||
bch2_trans_verify_not_restarted(_trans, _restart_count);\
|
||||
\
|
||||
_ret ?: trans_was_restarted(_trans, _restart_count); \
|
||||
_ret2 ?: trans_was_restarted(_trans, _restart_count); \
|
||||
})
|
||||
|
||||
#define for_each_btree_key2(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _do) \
|
||||
({ \
|
||||
int _ret = 0; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
@ -723,15 +723,15 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
while (1) { \
|
||||
u32 _restart_count = bch2_trans_begin(_trans); \
|
||||
\
|
||||
_ret = 0; \
|
||||
_ret3 = 0; \
|
||||
(_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
|
||||
if (!(_k).k) \
|
||||
break; \
|
||||
\
|
||||
_ret = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
|
||||
_ret3 = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
|
||||
continue; \
|
||||
if (_ret) \
|
||||
if (_ret3) \
|
||||
break; \
|
||||
bch2_trans_verify_not_restarted(_trans, _restart_count);\
|
||||
if (!bch2_btree_iter_advance(&(_iter))) \
|
||||
@ -739,13 +739,13 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
} \
|
||||
\
|
||||
bch2_trans_iter_exit((_trans), &(_iter)); \
|
||||
_ret; \
|
||||
_ret3; \
|
||||
})
|
||||
|
||||
#define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _do) \
|
||||
({ \
|
||||
int _ret = 0; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
@ -753,15 +753,15 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
while (1) { \
|
||||
u32 _restart_count = bch2_trans_begin(_trans); \
|
||||
\
|
||||
_ret = 0; \
|
||||
_ret3 = 0; \
|
||||
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
|
||||
if (!(_k).k) \
|
||||
break; \
|
||||
\
|
||||
_ret = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
|
||||
_ret3 = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
|
||||
continue; \
|
||||
if (_ret) \
|
||||
if (_ret3) \
|
||||
break; \
|
||||
bch2_trans_verify_not_restarted(_trans, _restart_count);\
|
||||
if (!bch2_btree_iter_advance(&(_iter))) \
|
||||
@ -769,13 +769,13 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
} \
|
||||
\
|
||||
bch2_trans_iter_exit((_trans), &(_iter)); \
|
||||
_ret; \
|
||||
_ret3; \
|
||||
})
|
||||
|
||||
#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _do) \
|
||||
({ \
|
||||
int _ret = 0; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
@ -784,14 +784,14 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
u32 _restart_count = bch2_trans_begin(_trans); \
|
||||
(_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
|
||||
if (!(_k).k) { \
|
||||
_ret = 0; \
|
||||
_ret3 = 0; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
_ret = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
|
||||
_ret3 = bkey_err(_k) ?: (_do); \
|
||||
if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
|
||||
continue; \
|
||||
if (_ret) \
|
||||
if (_ret3) \
|
||||
break; \
|
||||
bch2_trans_verify_not_restarted(_trans, _restart_count);\
|
||||
if (!bch2_btree_iter_rewind(&(_iter))) \
|
||||
@ -799,7 +799,7 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
} \
|
||||
\
|
||||
bch2_trans_iter_exit((_trans), &(_iter)); \
|
||||
_ret; \
|
||||
_ret3; \
|
||||
})
|
||||
|
||||
#define for_each_btree_key_commit(_trans, _iter, _btree_id, \
|
||||
|
@ -242,8 +242,6 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
|
||||
}
|
||||
|
||||
if (ck) {
|
||||
int ret;
|
||||
|
||||
ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
|
||||
if (unlikely(ret)) {
|
||||
bkey_cached_move_to_freelist(bc, ck);
|
||||
|
@ -214,7 +214,11 @@ inline void bch2_btree_add_journal_pin(struct bch_fs *c,
|
||||
}
|
||||
|
||||
/**
|
||||
* btree_insert_key - insert a key one key into a leaf node
|
||||
* bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
|
||||
* @trans: btree transaction object
|
||||
* @path: path pointing to @insert's pos
|
||||
* @insert: key to insert
|
||||
* @journal_seq: sequence number of journal reservation
|
||||
*/
|
||||
inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
@ -555,7 +559,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
struct btree_write_buffered_key *wb;
|
||||
struct btree_trans_commit_hook *h;
|
||||
unsigned u64s = 0;
|
||||
bool marking = false;
|
||||
int ret;
|
||||
|
||||
if (race_fault()) {
|
||||
@ -584,9 +587,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
*stopped_at = i;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (btree_node_type_needs_gc(i->bkey_type))
|
||||
marking = true;
|
||||
}
|
||||
|
||||
if (trans->nr_wb_updates &&
|
||||
|
@ -184,34 +184,34 @@ struct btree_node_iter {
|
||||
/*
|
||||
* Iterate over all possible positions, synthesizing deleted keys for holes:
|
||||
*/
|
||||
static const u16 BTREE_ITER_SLOTS = 1 << 0;
|
||||
static const u16 BTREE_ITER_ALL_LEVELS = 1 << 1;
|
||||
static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
|
||||
static const __maybe_unused u16 BTREE_ITER_ALL_LEVELS = 1 << 1;
|
||||
/*
|
||||
* Indicates that intent locks should be taken on leaf nodes, because we expect
|
||||
* to be doing updates:
|
||||
*/
|
||||
static const u16 BTREE_ITER_INTENT = 1 << 2;
|
||||
static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 2;
|
||||
/*
|
||||
* Causes the btree iterator code to prefetch additional btree nodes from disk:
|
||||
*/
|
||||
static const u16 BTREE_ITER_PREFETCH = 1 << 3;
|
||||
static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 3;
|
||||
/*
|
||||
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
|
||||
* @pos or the first key strictly greater than @pos
|
||||
*/
|
||||
static const u16 BTREE_ITER_IS_EXTENTS = 1 << 4;
|
||||
static const u16 BTREE_ITER_NOT_EXTENTS = 1 << 5;
|
||||
static const u16 BTREE_ITER_CACHED = 1 << 6;
|
||||
static const u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 7;
|
||||
static const u16 BTREE_ITER_WITH_UPDATES = 1 << 8;
|
||||
static const u16 BTREE_ITER_WITH_JOURNAL = 1 << 9;
|
||||
static const u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
|
||||
static const u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 11;
|
||||
static const u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 12;
|
||||
static const u16 BTREE_ITER_NOPRESERVE = 1 << 13;
|
||||
static const u16 BTREE_ITER_CACHED_NOFILL = 1 << 14;
|
||||
static const u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 15;
|
||||
#define __BTREE_ITER_FLAGS_END 16
|
||||
static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 4;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 5;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 6;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 7;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 8;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 9;
|
||||
static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
|
||||
static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 11;
|
||||
static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 12;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 13;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 14;
|
||||
static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 15;
|
||||
#define __BTREE_ITER_FLAGS_END 16
|
||||
|
||||
enum btree_path_uptodate {
|
||||
BTREE_ITER_UPTODATE = 0,
|
||||
|
@ -681,15 +681,17 @@ int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
|
||||
* bch2_btree_insert - insert keys into the extent btree
|
||||
* @c: pointer to struct bch_fs
|
||||
* @id: btree to insert into
|
||||
* @insert_keys: list of keys to insert
|
||||
* @hook: insert callback
|
||||
* @k: key to insert
|
||||
* @disk_res: must be non-NULL whenever inserting or potentially
|
||||
* splitting data extents
|
||||
* @flags: transaction commit flags
|
||||
*
|
||||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
|
||||
struct bkey_i *k,
|
||||
struct disk_reservation *disk_res,
|
||||
u64 *journal_seq, int flags)
|
||||
int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
|
||||
struct disk_reservation *disk_res, int flags)
|
||||
{
|
||||
return bch2_trans_do(c, disk_res, journal_seq, flags,
|
||||
return bch2_trans_do(c, disk_res, NULL, flags,
|
||||
bch2_btree_insert_trans(&trans, id, k, 0));
|
||||
}
|
||||
|
||||
@ -847,6 +849,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
|
||||
return bch2_trans_update_buffered(trans, btree, k);
|
||||
}
|
||||
|
||||
__printf(2, 0)
|
||||
static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list args)
|
||||
{
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@ -883,6 +886,7 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
__printf(3, 0)
|
||||
static int
|
||||
__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
|
||||
va_list args)
|
||||
@ -900,6 +904,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
|
||||
return ret;
|
||||
}
|
||||
|
||||
__printf(2, 3)
|
||||
int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
@ -915,6 +920,7 @@ int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
|
||||
* Use for logging messages during recovery to enable reserved space and avoid
|
||||
* blocking.
|
||||
*/
|
||||
__printf(2, 3)
|
||||
int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
@ -66,7 +66,7 @@ int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
|
||||
int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
|
||||
enum btree_update_flags);
|
||||
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
||||
struct disk_reservation *, u64 *, int flags);
|
||||
struct disk_reservation *, int flags);
|
||||
|
||||
int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
||||
struct bpos, struct bpos, unsigned, u64 *);
|
||||
@ -115,8 +115,8 @@ void bch2_trans_commit_hook(struct btree_trans *,
|
||||
struct btree_trans_commit_hook *);
|
||||
int __bch2_trans_commit(struct btree_trans *, unsigned);
|
||||
|
||||
int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
|
||||
int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
|
||||
__printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
|
||||
__printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
|
||||
|
||||
/**
|
||||
* bch2_trans_commit - insert keys at given iterator positions
|
||||
|
@ -145,8 +145,13 @@ static size_t btree_node_u64s_with_format(struct btree *b,
|
||||
/**
|
||||
* bch2_btree_node_format_fits - check if we could rewrite node with a new format
|
||||
*
|
||||
* This assumes all keys can pack with the new format -- it just checks if
|
||||
* the re-packed keys would fit inside the node itself.
|
||||
* @c: filesystem handle
|
||||
* @b: btree node to rewrite
|
||||
* @new_f: bkey format to translate keys to
|
||||
*
|
||||
* Returns: true if all re-packed keys will be able to fit in a new node.
|
||||
*
|
||||
* Assumes all keys will successfully pack with the new format.
|
||||
*/
|
||||
bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
|
||||
struct bkey_format *new_f)
|
||||
@ -244,7 +249,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
||||
struct write_point *wp;
|
||||
struct btree *b;
|
||||
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
struct open_buckets ob = { .nr = 0 };
|
||||
struct open_buckets obs = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
|
||||
unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
|
||||
@ -257,7 +262,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
||||
struct btree_alloc *a =
|
||||
&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
|
||||
|
||||
ob = a->ob;
|
||||
obs = a->ob;
|
||||
bkey_copy(&tmp.k, &a->k);
|
||||
mutex_unlock(&c->btree_reserve_cache_lock);
|
||||
goto mem_alloc;
|
||||
@ -292,7 +297,7 @@ retry:
|
||||
bkey_btree_ptr_v2_init(&tmp.k);
|
||||
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
|
||||
|
||||
bch2_open_bucket_get(c, wp, &ob);
|
||||
bch2_open_bucket_get(c, wp, &obs);
|
||||
bch2_alloc_sectors_done(c, wp);
|
||||
mem_alloc:
|
||||
b = bch2_btree_node_mem_alloc(trans, interior_node);
|
||||
@ -304,7 +309,7 @@ mem_alloc:
|
||||
BUG_ON(b->ob.nr);
|
||||
|
||||
bkey_copy(&b->key, &tmp.k);
|
||||
b->ob = ob;
|
||||
b->ob = obs;
|
||||
|
||||
return b;
|
||||
}
|
||||
@ -697,15 +702,15 @@ err:
|
||||
* btree_interior_update_lock:
|
||||
*/
|
||||
if (as->b == b) {
|
||||
struct bset *i = btree_bset_last(b);
|
||||
|
||||
BUG_ON(!b->c.level);
|
||||
BUG_ON(!btree_node_dirty(b));
|
||||
|
||||
if (!ret) {
|
||||
i->journal_seq = cpu_to_le64(
|
||||
struct bset *last = btree_bset_last(b);
|
||||
|
||||
last->journal_seq = cpu_to_le64(
|
||||
max(journal_seq,
|
||||
le64_to_cpu(i->journal_seq)));
|
||||
le64_to_cpu(last->journal_seq)));
|
||||
|
||||
bch2_btree_add_journal_pin(c, b, journal_seq);
|
||||
} else {
|
||||
@ -1216,18 +1221,6 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
|
||||
bch2_recalc_btree_reserve(c);
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_set_root - update the root in memory and on disk
|
||||
*
|
||||
* To ensure forward progress, the current task must not be holding any
|
||||
* btree node write locks. However, you must hold an intent lock on the
|
||||
* old root.
|
||||
*
|
||||
* Note: This allocates a journal entry but doesn't add any keys to
|
||||
* it. All the btree roots are part of every journal write, so there
|
||||
* is nothing new to be done. This just guarantees that there is a
|
||||
* journal write.
|
||||
*/
|
||||
static void bch2_btree_set_root(struct btree_update *as,
|
||||
struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
@ -1341,12 +1334,12 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
|
||||
;
|
||||
|
||||
while (!bch2_keylist_empty(keys)) {
|
||||
struct bkey_i *k = bch2_keylist_front(keys);
|
||||
insert = bch2_keylist_front(keys);
|
||||
|
||||
if (bpos_gt(k->k.p, b->key.k.p))
|
||||
if (bpos_gt(insert->k.p, b->key.k.p))
|
||||
break;
|
||||
|
||||
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
|
||||
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert);
|
||||
bch2_keylist_pop_front(keys);
|
||||
}
|
||||
}
|
||||
@ -1661,12 +1654,16 @@ bch2_btree_insert_keys_interior(struct btree_update *as,
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_insert_node - insert bkeys into a given btree node
|
||||
* bch2_btree_insert_node - insert bkeys into a given btree node
|
||||
*
|
||||
* @iter: btree iterator
|
||||
* @as: btree_update object
|
||||
* @trans: btree_trans object
|
||||
* @path: path that points to current node
|
||||
* @b: node to insert keys into
|
||||
* @keys: list of keys to insert
|
||||
* @hook: insert callback
|
||||
* @persistent: if not null, @persistent will wait on journal write
|
||||
* @flags: transaction commit flags
|
||||
*
|
||||
* Returns: 0 on success, typically transaction restart error on failure
|
||||
*
|
||||
* Inserts as many keys as it can into a given btree node, splitting it if full.
|
||||
* If a split occurred, this function will return early. This can only happen
|
||||
@ -1934,9 +1931,6 @@ err_free_update:
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_node_rewrite - Rewrite/move a btree node
|
||||
*/
|
||||
int bch2_btree_node_rewrite(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b,
|
||||
|
@ -366,11 +366,11 @@ struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
|
||||
BUG_ON(!bch2_checksum_mergeable(type));
|
||||
|
||||
while (b_len) {
|
||||
unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
|
||||
unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
|
||||
|
||||
bch2_checksum_update(&state,
|
||||
page_address(ZERO_PAGE(0)), b);
|
||||
b_len -= b;
|
||||
page_address(ZERO_PAGE(0)), page_len);
|
||||
b_len -= page_len;
|
||||
}
|
||||
a.lo = (__le64 __force) bch2_checksum_final(&state);
|
||||
a.lo ^= b.lo;
|
||||
@ -395,9 +395,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
|
||||
unsigned csum_type;
|
||||
struct bch_csum csum;
|
||||
} splits[3] = {
|
||||
{ crc_a, len_a, new_csum_type },
|
||||
{ crc_b, len_b, new_csum_type },
|
||||
{ NULL, bio_sectors(bio) - len_a - len_b, new_csum_type },
|
||||
{ crc_a, len_a, new_csum_type, { 0 }},
|
||||
{ crc_b, len_b, new_csum_type, { 0 } },
|
||||
{ NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
|
||||
}, *i;
|
||||
bool mergeable = crc_old.csum_type == new_csum_type &&
|
||||
bch2_checksum_mergeable(new_csum_type);
|
||||
|
@ -40,10 +40,9 @@ struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
|
||||
*/
|
||||
#define csum_vstruct(_c, _type, _nonce, _i) \
|
||||
({ \
|
||||
const void *start = ((const void *) (_i)) + sizeof((_i)->csum); \
|
||||
const void *end = vstruct_end(_i); \
|
||||
const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\
|
||||
\
|
||||
bch2_checksum(_c, _type, _nonce, start, end - start); \
|
||||
bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\
|
||||
})
|
||||
|
||||
int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
|
||||
|
@ -570,7 +570,6 @@ void bch2_fs_compress_exit(struct bch_fs *c)
|
||||
static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
{
|
||||
size_t decompress_workspace_size = 0;
|
||||
bool decompress_workspace_needed;
|
||||
ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
|
||||
c->opts.encoded_extent_max);
|
||||
struct {
|
||||
@ -580,7 +579,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
size_t decompress_workspace;
|
||||
} compression_types[] = {
|
||||
{ BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4,
|
||||
max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) },
|
||||
max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS),
|
||||
0 },
|
||||
{ BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip,
|
||||
zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
|
||||
zlib_inflate_workspacesize(), },
|
||||
@ -619,9 +619,6 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
if (!(features & (1 << i->feature)))
|
||||
continue;
|
||||
|
||||
if (i->decompress_workspace)
|
||||
decompress_workspace_needed = true;
|
||||
|
||||
if (mempool_initialized(&c->compress_workspace[i->type]))
|
||||
continue;
|
||||
|
||||
|
@ -49,10 +49,6 @@ static void trace_move_extent_fail2(struct data_update *m,
|
||||
if (insert) {
|
||||
i = 0;
|
||||
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
|
||||
struct bkey_s new_s;
|
||||
new_s.k = (void *) new.k;
|
||||
new_s.v = (void *) new.v;
|
||||
|
||||
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
|
||||
(ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
|
||||
!ptr->cached)
|
||||
|
@ -153,10 +153,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
BUG_ON(b->nsets != 1);
|
||||
|
||||
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
|
||||
if (k->type == KEY_TYPE_btree_ptr_v2) {
|
||||
struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
|
||||
v->mem_ptr = 0;
|
||||
}
|
||||
if (k->type == KEY_TYPE_btree_ptr_v2)
|
||||
((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0;
|
||||
|
||||
v = c->verify_data;
|
||||
bkey_copy(&v->key, &b->key);
|
||||
|
@ -32,21 +32,21 @@ static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
|
||||
|
||||
for (i = 0; i < sb->nr_devices; i++) {
|
||||
struct bch_member *m = mi->members + i;
|
||||
unsigned g;
|
||||
unsigned group_id;
|
||||
|
||||
if (!BCH_MEMBER_GROUP(m))
|
||||
continue;
|
||||
|
||||
g = BCH_MEMBER_GROUP(m) - 1;
|
||||
group_id = BCH_MEMBER_GROUP(m) - 1;
|
||||
|
||||
if (g >= nr_groups) {
|
||||
if (group_id >= nr_groups) {
|
||||
prt_printf(err, "disk %u has invalid label %u (have %u)",
|
||||
i, g, nr_groups);
|
||||
i, group_id, nr_groups);
|
||||
return -BCH_ERR_invalid_sb_disk_groups;
|
||||
}
|
||||
|
||||
if (BCH_GROUP_DELETED(&groups->entries[g])) {
|
||||
prt_printf(err, "disk %u has deleted label %u", i, g);
|
||||
if (BCH_GROUP_DELETED(&groups->entries[group_id])) {
|
||||
prt_printf(err, "disk %u has deleted label %u", i, group_id);
|
||||
return -BCH_ERR_invalid_sb_disk_groups;
|
||||
}
|
||||
}
|
||||
|
@ -12,8 +12,6 @@ static const char * const bch2_errcode_strs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
#define BCH_ERR_0 0
|
||||
|
||||
static unsigned bch2_errcode_parents[] = {
|
||||
#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = class,
|
||||
BCH_ERRCODES()
|
||||
|
@ -695,12 +695,12 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (IS_ERR_OR_NULL(folio))
|
||||
goto err_unlock;
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
goto out;
|
||||
|
||||
offset = pos - folio_pos(folio);
|
||||
len = min_t(size_t, len, folio_end_pos(folio) - pos);
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
goto out;
|
||||
|
||||
/* If we're writing entire folio, don't need to read it in first: */
|
||||
if (!offset && len == folio_size(folio))
|
||||
goto out;
|
||||
@ -801,10 +801,10 @@ int bch2_write_end(struct file *file, struct address_space *mapping,
|
||||
return copied;
|
||||
}
|
||||
|
||||
static noinline void folios_trunc(folios *folios, struct folio **fi)
|
||||
static noinline void folios_trunc(folios *fs, struct folio **fi)
|
||||
{
|
||||
while (folios->data + folios->nr > fi) {
|
||||
struct folio *f = darray_pop(folios);
|
||||
while (fs->data + fs->nr > fi) {
|
||||
struct folio *f = darray_pop(fs);
|
||||
|
||||
folio_unlock(f);
|
||||
folio_put(f);
|
||||
@ -818,35 +818,35 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
{
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch2_folio_reservation res;
|
||||
folios folios;
|
||||
folios fs;
|
||||
struct folio **fi, *f;
|
||||
unsigned copied = 0, f_offset;
|
||||
u64 end = pos + len, f_pos;
|
||||
unsigned copied = 0, f_offset, f_copied;
|
||||
u64 end = pos + len, f_pos, f_len;
|
||||
loff_t last_folio_pos = inode->v.i_size;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!len);
|
||||
|
||||
bch2_folio_reservation_init(c, inode, &res);
|
||||
darray_init(&folios);
|
||||
darray_init(&fs);
|
||||
|
||||
ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
|
||||
FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
|
||||
mapping_gfp_mask(mapping),
|
||||
&folios);
|
||||
&fs);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
BUG_ON(!folios.nr);
|
||||
BUG_ON(!fs.nr);
|
||||
|
||||
f = darray_first(folios);
|
||||
f = darray_first(fs);
|
||||
if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
|
||||
ret = bch2_read_single_folio(f, mapping);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
f = darray_last(folios);
|
||||
f = darray_last(fs);
|
||||
end = min(end, folio_end_pos(f));
|
||||
last_folio_pos = folio_pos(f);
|
||||
if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
|
||||
@ -859,15 +859,15 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
}
|
||||
}
|
||||
|
||||
ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
|
||||
ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
f_pos = pos;
|
||||
f_offset = pos - folio_pos(darray_first(folios));
|
||||
darray_for_each(folios, fi) {
|
||||
struct folio *f = *fi;
|
||||
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
f_offset = pos - folio_pos(darray_first(fs));
|
||||
darray_for_each(fs, fi) {
|
||||
f = *fi;
|
||||
f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
|
||||
/*
|
||||
* XXX: per POSIX and fstests generic/275, on -ENOSPC we're
|
||||
@ -879,11 +879,11 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
*/
|
||||
ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
|
||||
if (unlikely(ret)) {
|
||||
folios_trunc(&folios, fi);
|
||||
if (!folios.nr)
|
||||
folios_trunc(&fs, fi);
|
||||
if (!fs.nr)
|
||||
goto out;
|
||||
|
||||
end = min(end, folio_end_pos(darray_last(folios)));
|
||||
end = min(end, folio_end_pos(darray_last(fs)));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -892,18 +892,17 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
}
|
||||
|
||||
if (mapping_writably_mapped(mapping))
|
||||
darray_for_each(folios, fi)
|
||||
darray_for_each(fs, fi)
|
||||
flush_dcache_folio(*fi);
|
||||
|
||||
f_pos = pos;
|
||||
f_offset = pos - folio_pos(darray_first(folios));
|
||||
darray_for_each(folios, fi) {
|
||||
struct folio *f = *fi;
|
||||
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
|
||||
|
||||
f_offset = pos - folio_pos(darray_first(fs));
|
||||
darray_for_each(fs, fi) {
|
||||
f = *fi;
|
||||
f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
|
||||
if (!f_copied) {
|
||||
folios_trunc(&folios, fi);
|
||||
folios_trunc(&fs, fi);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -912,7 +911,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
pos + copied + f_copied < inode->v.i_size) {
|
||||
iov_iter_revert(iter, f_copied);
|
||||
folio_zero_range(f, 0, folio_size(f));
|
||||
folios_trunc(&folios, fi);
|
||||
folios_trunc(&fs, fi);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -920,7 +919,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
copied += f_copied;
|
||||
|
||||
if (f_copied != f_len) {
|
||||
folios_trunc(&folios, fi + 1);
|
||||
folios_trunc(&fs, fi + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -939,10 +938,10 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
spin_unlock(&inode->v.i_lock);
|
||||
|
||||
f_pos = pos;
|
||||
f_offset = pos - folio_pos(darray_first(folios));
|
||||
darray_for_each(folios, fi) {
|
||||
struct folio *f = *fi;
|
||||
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
f_offset = pos - folio_pos(darray_first(fs));
|
||||
darray_for_each(fs, fi) {
|
||||
f = *fi;
|
||||
f_len = min(end, folio_end_pos(f)) - f_pos;
|
||||
|
||||
if (!folio_test_uptodate(f))
|
||||
folio_mark_uptodate(f);
|
||||
@ -955,7 +954,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
|
||||
inode->ei_last_dirtied = (unsigned long) current;
|
||||
out:
|
||||
darray_for_each(folios, fi) {
|
||||
darray_for_each(fs, fi) {
|
||||
folio_unlock(*fi);
|
||||
folio_put(*fi);
|
||||
}
|
||||
@ -968,7 +967,7 @@ out:
|
||||
if (last_folio_pos >= inode->v.i_size)
|
||||
truncate_pagecache(&inode->v, inode->v.i_size);
|
||||
|
||||
darray_exit(&folios);
|
||||
darray_exit(&fs);
|
||||
bch2_folio_reservation_put(c, inode, &res);
|
||||
|
||||
return copied ?: ret;
|
||||
|
@ -14,7 +14,7 @@
|
||||
int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
|
||||
loff_t start, u64 end,
|
||||
int fgp_flags, gfp_t gfp,
|
||||
folios *folios)
|
||||
folios *fs)
|
||||
{
|
||||
struct folio *f;
|
||||
u64 pos = start;
|
||||
@ -24,7 +24,7 @@ int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
|
||||
if ((u64) pos >= (u64) start + (1ULL << 20))
|
||||
fgp_flags &= ~FGP_CREAT;
|
||||
|
||||
ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
|
||||
ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -32,16 +32,16 @@ int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
break;
|
||||
|
||||
BUG_ON(folios->nr && folio_pos(f) != pos);
|
||||
BUG_ON(fs->nr && folio_pos(f) != pos);
|
||||
|
||||
pos = folio_end_pos(f);
|
||||
darray_push(folios, f);
|
||||
darray_push(fs, f);
|
||||
}
|
||||
|
||||
if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
|
||||
if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
|
||||
ret = -ENOMEM;
|
||||
|
||||
return folios->nr ? 0 : ret;
|
||||
return fs->nr ? 0 : ret;
|
||||
}
|
||||
|
||||
/* pagecache_block must be held */
|
||||
@ -73,12 +73,15 @@ int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Useful for debug tracing: */
|
||||
static const char * const bch2_folio_sector_states[] = {
|
||||
#define x(n) #n,
|
||||
BCH_FOLIO_SECTOR_STATE()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
#endif
|
||||
|
||||
static inline enum bch_folio_sector_state
|
||||
folio_sector_dirty(enum bch_folio_sector_state state)
|
||||
@ -177,20 +180,20 @@ static void __bch2_folio_set(struct folio *folio,
|
||||
* extents btree:
|
||||
*/
|
||||
int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
|
||||
struct folio **folios, unsigned nr_folios)
|
||||
struct folio **fs, unsigned nr_folios)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_folio *s;
|
||||
u64 offset = folio_sector(folios[0]);
|
||||
u64 offset = folio_sector(fs[0]);
|
||||
unsigned folio_idx;
|
||||
u32 snapshot;
|
||||
bool need_set = false;
|
||||
int ret;
|
||||
|
||||
for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
|
||||
s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
|
||||
s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -216,7 +219,7 @@ retry:
|
||||
unsigned state = bkey_to_sector_state(k);
|
||||
|
||||
while (folio_idx < nr_folios) {
|
||||
struct folio *folio = folios[folio_idx];
|
||||
struct folio *folio = fs[folio_idx];
|
||||
u64 folio_start = folio_sector(folio);
|
||||
u64 folio_end = folio_end_sector(folio);
|
||||
unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
|
||||
|
@ -5,7 +5,7 @@
|
||||
/* Inode flags: */
|
||||
|
||||
/* bcachefs inode flags -> vfs inode flags: */
|
||||
static const unsigned bch_flags_to_vfs[] = {
|
||||
static const __maybe_unused unsigned bch_flags_to_vfs[] = {
|
||||
[__BCH_INODE_SYNC] = S_SYNC,
|
||||
[__BCH_INODE_IMMUTABLE] = S_IMMUTABLE,
|
||||
[__BCH_INODE_APPEND] = S_APPEND,
|
||||
@ -13,7 +13,7 @@ static const unsigned bch_flags_to_vfs[] = {
|
||||
};
|
||||
|
||||
/* bcachefs inode flags -> FS_IOC_GETFLAGS: */
|
||||
static const unsigned bch_flags_to_uflags[] = {
|
||||
static const __maybe_unused unsigned bch_flags_to_uflags[] = {
|
||||
[__BCH_INODE_SYNC] = FS_SYNC_FL,
|
||||
[__BCH_INODE_IMMUTABLE] = FS_IMMUTABLE_FL,
|
||||
[__BCH_INODE_APPEND] = FS_APPEND_FL,
|
||||
@ -22,7 +22,7 @@ static const unsigned bch_flags_to_uflags[] = {
|
||||
};
|
||||
|
||||
/* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
|
||||
static const unsigned bch_flags_to_xflags[] = {
|
||||
static const __maybe_unused unsigned bch_flags_to_xflags[] = {
|
||||
[__BCH_INODE_SYNC] = FS_XFLAG_SYNC,
|
||||
[__BCH_INODE_IMMUTABLE] = FS_XFLAG_IMMUTABLE,
|
||||
[__BCH_INODE_APPEND] = FS_XFLAG_APPEND,
|
||||
|
@ -1661,7 +1661,7 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
|
||||
up_write(&c->state_lock);
|
||||
}
|
||||
|
||||
if (opts.errors >= 0)
|
||||
if (opt_defined(opts, errors))
|
||||
c->opts.errors = opts.errors;
|
||||
err:
|
||||
return bch2_err_class(ret);
|
||||
|
@ -471,7 +471,12 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
|
||||
* key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
|
||||
* and @ancestor hasn't been overwritten in @seen
|
||||
*
|
||||
* That is, returns whether key in @ancestor snapshot is visible in @id snapshot
|
||||
* @c: filesystem handle
|
||||
* @seen: list of snapshot ids already seen at current position
|
||||
* @id: descendent snapshot id
|
||||
* @ancestor: ancestor snapshot id
|
||||
*
|
||||
* Returns: whether key in @ancestor snapshot is visible in @id snapshot
|
||||
*/
|
||||
static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
|
||||
u32 id, u32 ancestor)
|
||||
@ -516,14 +521,16 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
|
||||
* snapshot id @dst, test whether there is some snapshot in which @dst is
|
||||
* visible.
|
||||
*
|
||||
* This assumes we're visiting @src keys in natural key order.
|
||||
* @c: filesystem handle
|
||||
* @s: list of snapshot IDs already seen at @src
|
||||
* @src: snapshot ID of src key
|
||||
* @dst: snapshot ID of dst key
|
||||
* Returns: true if there is some snapshot in which @dst is visible
|
||||
*
|
||||
* @s - list of snapshot IDs already seen at @src
|
||||
* @src - snapshot ID of src key
|
||||
* @dst - snapshot ID of dst key
|
||||
* Assumes we're visiting @src keys in natural key order
|
||||
*/
|
||||
static int ref_visible(struct bch_fs *c, struct snapshots_seen *s,
|
||||
u32 src, u32 dst)
|
||||
static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
|
||||
u32 src, u32 dst)
|
||||
{
|
||||
return dst <= src
|
||||
? key_visible_in_snapshot(c, s, dst, src)
|
||||
|
@ -120,8 +120,7 @@ static inline void bch2_inode_pack_inlined(struct bkey_inode_buf *packed,
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
|
||||
struct bch_inode_unpacked unpacked;
|
||||
|
||||
int ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i),
|
||||
&unpacked);
|
||||
ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i), &unpacked);
|
||||
BUG_ON(ret);
|
||||
BUG_ON(unpacked.bi_inum != inode->bi_inum);
|
||||
BUG_ON(unpacked.bi_hash_seed != inode->bi_hash_seed);
|
||||
|
@ -489,7 +489,8 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_write_index - after a write, update index to point to new data
|
||||
* __bch2_write_index - after a write, update index to point to new data
|
||||
* @op: bch_write_op to process
|
||||
*/
|
||||
static void __bch2_write_index(struct bch_write_op *op)
|
||||
{
|
||||
@ -526,10 +527,10 @@ static void __bch2_write_index(struct bch_write_op *op)
|
||||
op->written += sectors_start - keylist_sectors(keys);
|
||||
|
||||
if (ret && !bch2_err_matches(ret, EROFS)) {
|
||||
struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
|
||||
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
||||
|
||||
bch_err_inum_offset_ratelimited(c,
|
||||
k->k.p.inode, k->k.p.offset << 9,
|
||||
insert->k.p.inode, insert->k.p.offset << 9,
|
||||
"write error while doing btree update: %s",
|
||||
bch2_err_str(ret));
|
||||
}
|
||||
@ -1179,10 +1180,10 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
}));
|
||||
|
||||
if (ret && !bch2_err_matches(ret, EROFS)) {
|
||||
struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
|
||||
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
||||
|
||||
bch_err_inum_offset_ratelimited(c,
|
||||
k->k.p.inode, k->k.p.offset << 9,
|
||||
insert->k.p.inode, insert->k.p.offset << 9,
|
||||
"write error while doing btree update: %s",
|
||||
bch2_err_str(ret));
|
||||
}
|
||||
@ -1546,7 +1547,8 @@ err:
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_write - handle a write to a cache device or flash only volume
|
||||
* bch2_write() - handle a write to a cache device or flash only volume
|
||||
* @cl: &bch_write_op->cl
|
||||
*
|
||||
* This is the starting point for any data to end up in a cache device; it could
|
||||
* be from a normal write, or a writeback write, or a write to a flash only
|
||||
|
@ -588,8 +588,13 @@ out:
|
||||
|
||||
/**
|
||||
* bch2_journal_flush_seq_async - wait for a journal entry to be written
|
||||
* @j: journal object
|
||||
* @seq: seq to flush
|
||||
* @parent: closure object to wait with
|
||||
* Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
|
||||
* -EIO if @seq will never be flushed
|
||||
*
|
||||
* like bch2_journal_wait_on_seq, except that it triggers a write immediately if
|
||||
* Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
|
||||
* necessary
|
||||
*/
|
||||
int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
|
||||
@ -944,7 +949,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
||||
goto unlock;
|
||||
|
||||
while (ja->nr < nr) {
|
||||
struct disk_reservation disk_res = { 0, 0 };
|
||||
struct disk_reservation disk_res = { 0, 0, 0 };
|
||||
|
||||
/*
|
||||
* note: journal buckets aren't really counted as _sectors_ used yet, so
|
||||
|
@ -237,17 +237,17 @@ static void journal_entry_err_msg(struct printbuf *out,
|
||||
|
||||
#define journal_entry_err(c, version, jset, entry, msg, ...) \
|
||||
({ \
|
||||
struct printbuf buf = PRINTBUF; \
|
||||
struct printbuf _buf = PRINTBUF; \
|
||||
\
|
||||
journal_entry_err_msg(&buf, version, jset, entry); \
|
||||
prt_printf(&buf, msg, ##__VA_ARGS__); \
|
||||
journal_entry_err_msg(&_buf, version, jset, entry); \
|
||||
prt_printf(&_buf, msg, ##__VA_ARGS__); \
|
||||
\
|
||||
switch (flags & BKEY_INVALID_WRITE) { \
|
||||
case READ: \
|
||||
mustfix_fsck_err(c, "%s", buf.buf); \
|
||||
mustfix_fsck_err(c, "%s", _buf.buf); \
|
||||
break; \
|
||||
case WRITE: \
|
||||
bch_err(c, "corrupt metadata before write: %s\n", buf.buf);\
|
||||
bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
|
||||
if (bch2_fs_inconsistent(c)) { \
|
||||
ret = -BCH_ERR_fsck_errors_not_fixed; \
|
||||
goto fsck_err; \
|
||||
@ -255,7 +255,7 @@ static void journal_entry_err_msg(struct printbuf *out,
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
printbuf_exit(&buf); \
|
||||
printbuf_exit(&_buf); \
|
||||
true; \
|
||||
})
|
||||
|
||||
@ -1281,7 +1281,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
|
||||
ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
|
||||
|
||||
if (!i->ptrs[ptr].csum_good)
|
||||
bch_err_dev_offset(ca, i->ptrs[ptr].sector,
|
||||
@ -1379,16 +1379,21 @@ static void __journal_write_alloc(struct journal *j,
|
||||
}
|
||||
|
||||
/**
|
||||
* journal_next_bucket - move on to the next journal bucket if possible
|
||||
* journal_write_alloc - decide where to write next journal entry
|
||||
*
|
||||
* @j: journal object
|
||||
* @w: journal buf (entry to be written)
|
||||
*
|
||||
* Returns: 0 on success, or -EROFS on failure
|
||||
*/
|
||||
static int journal_write_alloc(struct journal *j, struct journal_buf *w,
|
||||
unsigned sectors)
|
||||
static int journal_write_alloc(struct journal *j, struct journal_buf *w)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_devs_mask devs;
|
||||
struct journal_device *ja;
|
||||
struct bch_dev *ca;
|
||||
struct dev_alloc_list devs_sorted;
|
||||
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
||||
unsigned target = c->opts.metadata_target ?:
|
||||
c->opts.foreground_target;
|
||||
unsigned i, replicas = 0, replicas_want =
|
||||
@ -1812,7 +1817,7 @@ void bch2_journal_write(struct closure *cl)
|
||||
|
||||
retry_alloc:
|
||||
spin_lock(&j->lock);
|
||||
ret = journal_write_alloc(j, w, sectors);
|
||||
ret = journal_write_alloc(j, w);
|
||||
|
||||
if (ret && j->can_discard) {
|
||||
spin_unlock(&j->lock);
|
||||
|
@ -292,7 +292,6 @@ void bch2_journal_do_discards(struct journal *j)
|
||||
|
||||
static void bch2_journal_reclaim_fast(struct journal *j)
|
||||
{
|
||||
struct journal_entry_pin_list temp;
|
||||
bool popped = false;
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
@ -303,7 +302,7 @@ static void bch2_journal_reclaim_fast(struct journal *j)
|
||||
*/
|
||||
while (!fifo_empty(&j->pin) &&
|
||||
!atomic_read(&fifo_peek_front(&j->pin).count)) {
|
||||
fifo_pop(&j->pin, temp);
|
||||
j->pin.front++;
|
||||
popped = true;
|
||||
}
|
||||
|
||||
@ -419,6 +418,8 @@ void bch2_journal_pin_set(struct journal *j, u64 seq,
|
||||
|
||||
/**
|
||||
* bch2_journal_pin_flush: ensure journal pin callback is no longer running
|
||||
* @j: journal object
|
||||
* @pin: pin to flush
|
||||
*/
|
||||
void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
|
||||
{
|
||||
@ -579,7 +580,11 @@ static u64 journal_seq_to_flush(struct journal *j)
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_journal_reclaim - free up journal buckets
|
||||
* __bch2_journal_reclaim - free up journal buckets
|
||||
* @j: journal object
|
||||
* @direct: direct or background reclaim?
|
||||
* @kicked: requested to run since we last ran?
|
||||
* Returns: 0 on success, or -EIO if the journal has been shutdown
|
||||
*
|
||||
* Background journal reclaim writes out btree nodes. It should be run
|
||||
* early enough so that we never completely run out of journal buckets.
|
||||
|
@ -724,7 +724,6 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
|
||||
if (!bp.level) {
|
||||
const struct bch_extent_ptr *ptr;
|
||||
struct bkey_s_c k;
|
||||
unsigned i = 0;
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
|
||||
|
@ -164,7 +164,7 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
|
||||
0, k, ({
|
||||
struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
|
||||
int ret = 0;
|
||||
int ret2 = 0;
|
||||
|
||||
saw++;
|
||||
|
||||
@ -173,11 +173,11 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
|
||||
else if (bucket_in_flight(buckets_in_flight, b.k))
|
||||
in_flight++;
|
||||
else {
|
||||
ret = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
|
||||
if (ret >= 0)
|
||||
ret2 = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
|
||||
if (ret2 >= 0)
|
||||
sectors += b.sectors;
|
||||
}
|
||||
ret;
|
||||
ret2;
|
||||
}));
|
||||
|
||||
pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
|
||||
@ -304,13 +304,13 @@ static int bch2_copygc_thread(void *arg)
|
||||
struct moving_context ctxt;
|
||||
struct bch_move_stats move_stats;
|
||||
struct io_clock *clock = &c->io_clock[WRITE];
|
||||
struct buckets_in_flight move_buckets;
|
||||
struct buckets_in_flight buckets;
|
||||
u64 last, wait;
|
||||
int ret = 0;
|
||||
|
||||
memset(&move_buckets, 0, sizeof(move_buckets));
|
||||
memset(&buckets, 0, sizeof(buckets));
|
||||
|
||||
ret = rhashtable_init(&move_buckets.table, &bch_move_bucket_params);
|
||||
ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "allocating copygc buckets in flight");
|
||||
return ret;
|
||||
@ -329,12 +329,12 @@ static int bch2_copygc_thread(void *arg)
|
||||
cond_resched();
|
||||
|
||||
if (!c->copy_gc_enabled) {
|
||||
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
|
||||
move_buckets_wait(&trans, &ctxt, &buckets, true);
|
||||
kthread_wait_freezable(c->copy_gc_enabled);
|
||||
}
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
|
||||
move_buckets_wait(&trans, &ctxt, &buckets, true);
|
||||
__refrigerator(false);
|
||||
continue;
|
||||
}
|
||||
@ -345,7 +345,7 @@ static int bch2_copygc_thread(void *arg)
|
||||
if (wait > clock->max_slop) {
|
||||
c->copygc_wait_at = last;
|
||||
c->copygc_wait = last + wait;
|
||||
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
|
||||
move_buckets_wait(&trans, &ctxt, &buckets, true);
|
||||
trace_and_count(c, copygc_wait, c, wait, last + wait);
|
||||
bch2_kthread_io_clock_wait(clock, last + wait,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
@ -355,14 +355,14 @@ static int bch2_copygc_thread(void *arg)
|
||||
c->copygc_wait = 0;
|
||||
|
||||
c->copygc_running = true;
|
||||
ret = bch2_copygc(&trans, &ctxt, &move_buckets);
|
||||
ret = bch2_copygc(&trans, &ctxt, &buckets);
|
||||
c->copygc_running = false;
|
||||
|
||||
wake_up(&c->copygc_running_wq);
|
||||
}
|
||||
|
||||
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
|
||||
rhashtable_destroy(&move_buckets.table);
|
||||
move_buckets_wait(&trans, &ctxt, &buckets, true);
|
||||
rhashtable_destroy(&buckets.table);
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_moving_ctxt_exit(&ctxt);
|
||||
|
||||
|
@ -469,7 +469,7 @@ struct bch_opts {
|
||||
#undef x
|
||||
};
|
||||
|
||||
static const struct bch_opts bch2_opts_default = {
|
||||
static const __maybe_unused struct bch_opts bch2_opts_default = {
|
||||
#define x(_name, _bits, _mode, _type, _sb_opt, _default, ...) \
|
||||
._name##_defined = true, \
|
||||
._name = _default, \
|
||||
|
@ -81,8 +81,10 @@ void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
|
||||
}
|
||||
|
||||
/**
|
||||
* printbuf_str - returns printbuf's buf as a C string, guaranteed to be null
|
||||
* terminated
|
||||
* bch2_printbuf_str() - returns printbuf's buf as a C string, guaranteed to be
|
||||
* null terminated
|
||||
* @buf: printbuf to terminate
|
||||
* Returns: Printbuf contents, as a nul terminated C string
|
||||
*/
|
||||
const char *bch2_printbuf_str(const struct printbuf *buf)
|
||||
{
|
||||
@ -97,8 +99,9 @@ const char *bch2_printbuf_str(const struct printbuf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* printbuf_exit - exit a printbuf, freeing memory it owns and poisoning it
|
||||
* bch2_printbuf_exit() - exit a printbuf, freeing memory it owns and poisoning it
|
||||
* against accidental use.
|
||||
* @buf: printbuf to exit
|
||||
*/
|
||||
void bch2_printbuf_exit(struct printbuf *buf)
|
||||
{
|
||||
@ -120,7 +123,7 @@ void bch2_printbuf_tabstop_pop(struct printbuf *buf)
|
||||
}
|
||||
|
||||
/*
|
||||
* printbuf_tabstop_set - add a tabstop, n spaces from the previous tabstop
|
||||
* bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop
|
||||
*
|
||||
* @buf: printbuf to control
|
||||
* @spaces: number of spaces from previous tabpstop
|
||||
@ -144,7 +147,7 @@ int bch2_printbuf_tabstop_push(struct printbuf *buf, unsigned spaces)
|
||||
}
|
||||
|
||||
/**
|
||||
* printbuf_indent_add - add to the current indent level
|
||||
* bch2_printbuf_indent_add() - add to the current indent level
|
||||
*
|
||||
* @buf: printbuf to control
|
||||
* @spaces: number of spaces to add to the current indent level
|
||||
@ -164,7 +167,7 @@ void bch2_printbuf_indent_add(struct printbuf *buf, unsigned spaces)
|
||||
}
|
||||
|
||||
/**
|
||||
* printbuf_indent_sub - subtract from the current indent level
|
||||
* bch2_printbuf_indent_sub() - subtract from the current indent level
|
||||
*
|
||||
* @buf: printbuf to control
|
||||
* @spaces: number of spaces to subtract from the current indent level
|
||||
@ -227,9 +230,8 @@ static void __prt_tab(struct printbuf *out)
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_tab - Advance printbuf to the next tabstop
|
||||
*
|
||||
* @buf: printbuf to control
|
||||
* bch2_prt_tab() - Advance printbuf to the next tabstop
|
||||
* @out: printbuf to control
|
||||
*
|
||||
* Advance output to the next tabstop by printing spaces.
|
||||
*/
|
||||
@ -267,7 +269,7 @@ static void __prt_tab_rjust(struct printbuf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_tab_rjust - Advance printbuf to the next tabstop, right justifying
|
||||
* bch2_prt_tab_rjust - Advance printbuf to the next tabstop, right justifying
|
||||
* previous output
|
||||
*
|
||||
* @buf: printbuf to control
|
||||
@ -284,11 +286,11 @@ void bch2_prt_tab_rjust(struct printbuf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_bytes_indented - Print an array of chars, handling embedded control characters
|
||||
* bch2_prt_bytes_indented() - Print an array of chars, handling embedded control characters
|
||||
*
|
||||
* @out: printbuf to output to
|
||||
* @str: string to print
|
||||
* @count: number of bytes to print
|
||||
* @out: output printbuf
|
||||
* @str: string to print
|
||||
* @count: number of bytes to print
|
||||
*
|
||||
* The following contol characters are handled as so:
|
||||
* \n: prt_newline newline that obeys current indent level
|
||||
@ -335,32 +337,38 @@ void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned cou
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_human_readable_u64 - Print out a u64 in human readable units
|
||||
* bch2_prt_human_readable_u64() - Print out a u64 in human readable units
|
||||
* @out: output printbuf
|
||||
* @v: integer to print
|
||||
*
|
||||
* Units of 2^10 (default) or 10^3 are controlled via @buf->si_units
|
||||
* Units of 2^10 (default) or 10^3 are controlled via @out->si_units
|
||||
*/
|
||||
void bch2_prt_human_readable_u64(struct printbuf *buf, u64 v)
|
||||
void bch2_prt_human_readable_u64(struct printbuf *out, u64 v)
|
||||
{
|
||||
bch2_printbuf_make_room(buf, 10);
|
||||
buf->pos += string_get_size(v, 1, !buf->si_units,
|
||||
buf->buf + buf->pos,
|
||||
printbuf_remaining_size(buf));
|
||||
bch2_printbuf_make_room(out, 10);
|
||||
out->pos += string_get_size(v, 1, !out->si_units,
|
||||
out->buf + out->pos,
|
||||
printbuf_remaining_size(out));
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_human_readable_s64 - Print out a s64 in human readable units
|
||||
* bch2_prt_human_readable_s64() - Print out a s64 in human readable units
|
||||
* @out: output printbuf
|
||||
* @v: integer to print
|
||||
*
|
||||
* Units of 2^10 (default) or 10^3 are controlled via @buf->si_units
|
||||
* Units of 2^10 (default) or 10^3 are controlled via @out->si_units
|
||||
*/
|
||||
void bch2_prt_human_readable_s64(struct printbuf *buf, s64 v)
|
||||
void bch2_prt_human_readable_s64(struct printbuf *out, s64 v)
|
||||
{
|
||||
if (v < 0)
|
||||
prt_char(buf, '-');
|
||||
bch2_prt_human_readable_u64(buf, abs(v));
|
||||
prt_char(out, '-');
|
||||
bch2_prt_human_readable_u64(out, abs(v));
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_units_u64 - Print out a u64 according to printbuf unit options
|
||||
* bch2_prt_units_u64() - Print out a u64 according to printbuf unit options
|
||||
* @out: output printbuf
|
||||
* @v: integer to print
|
||||
*
|
||||
* Units are either raw (default), or human reabable units (controlled via
|
||||
* @buf->human_readable_units)
|
||||
@ -374,7 +382,9 @@ void bch2_prt_units_u64(struct printbuf *out, u64 v)
|
||||
}
|
||||
|
||||
/**
|
||||
* prt_units_s64 - Print out a s64 according to printbuf unit options
|
||||
* bch2_prt_units_s64() - Print out a s64 according to printbuf unit options
|
||||
* @out: output printbuf
|
||||
* @v: integer to print
|
||||
*
|
||||
* Units are either raw (default), or human reabable units (controlled via
|
||||
* @buf->human_readable_units)
|
||||
|
@ -423,15 +423,9 @@ static int bch2_initialize_subvolumes(struct bch_fs *c)
|
||||
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
|
||||
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
|
||||
&root_tree.k_i,
|
||||
NULL, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots,
|
||||
&root_snapshot.k_i,
|
||||
NULL, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes,
|
||||
&root_volume.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@ -1010,9 +1004,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
bch2_inode_pack(&packed_inode, &root_inode);
|
||||
packed_inode.inode.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_inodes,
|
||||
&packed_inode.inode.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating root directory");
|
||||
goto err;
|
||||
|
@ -91,6 +91,9 @@ void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
bch2_bkey_ptrs_to_text(out, c, k);
|
||||
}
|
||||
|
||||
#if 0
|
||||
Currently disabled, needs to be debugged:
|
||||
|
||||
bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
|
||||
{
|
||||
struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
|
||||
@ -98,6 +101,7 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
|
||||
return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
|
||||
}
|
||||
#endif
|
||||
|
||||
int bch2_trans_mark_reflink_v(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
|
@ -29,7 +29,6 @@ static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
|
||||
#define SIX_LOCK_HELD_intent (1U << 26)
|
||||
#define SIX_LOCK_HELD_write (1U << 27)
|
||||
#define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
|
||||
#define SIX_LOCK_WAITING_intent (1U << (28 + SIX_LOCK_intent))
|
||||
#define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
|
||||
#define SIX_LOCK_NOSPIN (1U << 31)
|
||||
|
||||
|
@ -507,18 +507,18 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (!ret && !found) {
|
||||
struct bkey_i_subvolume *s;
|
||||
struct bkey_i_subvolume *u;
|
||||
|
||||
*subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
|
||||
|
||||
s = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
u = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, *subvol_id),
|
||||
0, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
SET_BCH_SUBVOLUME_SNAP(&s->v, false);
|
||||
SET_BCH_SUBVOLUME_SNAP(&u->v, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -930,7 +930,7 @@ static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
|
||||
swap(s->children[0], s->children[1]);
|
||||
}
|
||||
|
||||
int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
|
||||
|
@ -246,9 +246,9 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
|
||||
/* XXX: we're not checking that offline device have enough space */
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
struct bch_sb_handle *sb = &ca->disk_sb;
|
||||
struct bch_sb_handle *dev_sb = &ca->disk_sb;
|
||||
|
||||
if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
|
||||
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
#ifndef BCH_WRITE_REF_DEBUG
|
||||
percpu_ref_reinit(&c->writes);
|
||||
#else
|
||||
for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
|
||||
for (i = 0; i < BCH_WRITE_REF_NR; i++) {
|
||||
BUG_ON(atomic_long_read(&c->writes[i]));
|
||||
atomic_long_inc(&c->writes[i]);
|
||||
}
|
||||
|
@ -113,10 +113,6 @@ do { \
|
||||
prt_human_readable_s64(out, val); \
|
||||
} while (0)
|
||||
|
||||
#define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
|
||||
#define var_print(_var) sysfs_print(_var, var(_var))
|
||||
#define var_hprint(_var) sysfs_hprint(_var, var(_var))
|
||||
|
||||
#define sysfs_strtoul(file, var) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
@ -139,30 +135,6 @@ do { \
|
||||
_v; \
|
||||
})
|
||||
|
||||
#define strtoul_restrict_or_return(cp, min, max) \
|
||||
({ \
|
||||
unsigned long __v = 0; \
|
||||
int _r = strtoul_safe_restrict(cp, __v, min, max); \
|
||||
if (_r) \
|
||||
return _r; \
|
||||
__v; \
|
||||
})
|
||||
|
||||
#define strtoi_h_or_return(cp) \
|
||||
({ \
|
||||
u64 _v; \
|
||||
int _r = strtoi_h(cp, &_v); \
|
||||
if (_r) \
|
||||
return _r; \
|
||||
_v; \
|
||||
})
|
||||
|
||||
#define sysfs_hatoi(file, var) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
return strtoi_h(buf, &var) ?: (ssize_t) size; \
|
||||
} while (0)
|
||||
|
||||
write_attribute(trigger_gc);
|
||||
write_attribute(trigger_discards);
|
||||
write_attribute(trigger_invalidates);
|
||||
@ -291,7 +263,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
|
||||
incompressible_sectors = 0,
|
||||
compressed_sectors_compressed = 0,
|
||||
compressed_sectors_uncompressed = 0;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (!test_bit(BCH_FS_STARTED, &c->flags))
|
||||
return -EPERM;
|
||||
|
@ -128,14 +128,13 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
pr_info("inserting test keys");
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct bkey_i_cookie k;
|
||||
struct bkey_i_cookie ck;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
bkey_cookie_init(&ck.k_i);
|
||||
ck.k.p.offset = i;
|
||||
ck.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
@ -194,15 +193,14 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
pr_info("inserting test extents");
|
||||
|
||||
for (i = 0; i < nr; i += 8) {
|
||||
struct bkey_i_cookie k;
|
||||
struct bkey_i_cookie ck;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i + 8;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
k.k.size = 8;
|
||||
bkey_cookie_init(&ck.k_i);
|
||||
ck.k.p.offset = i + 8;
|
||||
ck.k.p.snapshot = U32_MAX;
|
||||
ck.k.size = 8;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
@ -263,14 +261,13 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
pr_info("inserting test keys");
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct bkey_i_cookie k;
|
||||
struct bkey_i_cookie ck;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i * 2;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
bkey_cookie_init(&ck.k_i);
|
||||
ck.k.p.offset = i * 2;
|
||||
ck.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
@ -336,15 +333,14 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
pr_info("inserting test keys");
|
||||
|
||||
for (i = 0; i < nr; i += 16) {
|
||||
struct bkey_i_cookie k;
|
||||
struct bkey_i_cookie ck;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i + 16;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
k.k.size = 8;
|
||||
bkey_cookie_init(&ck.k_i);
|
||||
ck.k.p.offset = i + 16;
|
||||
ck.k.p.snapshot = U32_MAX;
|
||||
ck.k.size = 8;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
@ -458,8 +454,7 @@ static int insert_test_extent(struct bch_fs *c,
|
||||
k.k_i.k.size = end - start;
|
||||
k.k_i.k.version.lo = test_version++;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@ -546,8 +541,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
|
||||
|
||||
bkey_cookie_init(&cookie.k_i);
|
||||
cookie.k.p.snapshot = snapid_hi;
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -572,8 +566,7 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_cookie_init(&cookie.k_i);
|
||||
cookie.k.p.snapshot = U32_MAX;
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -112,10 +112,10 @@ got_unit:
|
||||
|
||||
#define parse_or_ret(cp, _f) \
|
||||
do { \
|
||||
int ret = _f; \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
cp += ret; \
|
||||
int _ret = _f; \
|
||||
if (_ret < 0) \
|
||||
return _ret; \
|
||||
cp += _ret; \
|
||||
} while (0)
|
||||
|
||||
static int __bch2_strtou64_h(const char *cp, u64 *res)
|
||||
@ -605,11 +605,9 @@ void bch2_time_stats_init(struct bch2_time_stats *stats)
|
||||
|
||||
/**
|
||||
* bch2_ratelimit_delay() - return how long to delay until the next time to do
|
||||
* some work
|
||||
*
|
||||
* @d - the struct bch_ratelimit to update
|
||||
*
|
||||
* Returns the amount of time to delay by, in jiffies
|
||||
* some work
|
||||
* @d: the struct bch_ratelimit to update
|
||||
* Returns: the amount of time to delay by, in jiffies
|
||||
*/
|
||||
u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
|
||||
{
|
||||
@ -622,9 +620,8 @@ u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
|
||||
|
||||
/**
|
||||
* bch2_ratelimit_increment() - increment @d by the amount of work done
|
||||
*
|
||||
* @d - the struct bch_ratelimit to update
|
||||
* @done - the amount of work done, in arbitrary units
|
||||
* @d: the struct bch_ratelimit to update
|
||||
* @done: the amount of work done, in arbitrary units
|
||||
*/
|
||||
void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
|
||||
{
|
||||
|
@ -776,12 +776,12 @@ static inline void __move_gap(void *array, size_t element_size,
|
||||
|
||||
#define bubble_sort(_base, _nr, _cmp) \
|
||||
do { \
|
||||
ssize_t _i, _end; \
|
||||
ssize_t _i, _last; \
|
||||
bool _swapped = true; \
|
||||
\
|
||||
for (_end = (ssize_t) (_nr) - 1; _end > 0 && _swapped; --_end) {\
|
||||
for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
|
||||
_swapped = false; \
|
||||
for (_i = 0; _i < _end; _i++) \
|
||||
for (_i = 0; _i < _last; _i++) \
|
||||
if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
|
||||
swap((_base)[_i], (_base)[_i + 1]); \
|
||||
_swapped = true; \
|
||||
|
@ -13,10 +13,9 @@
|
||||
|
||||
/**
|
||||
* bch2_varint_encode - encode a variable length integer
|
||||
* @out - destination to encode to
|
||||
* @v - unsigned integer to encode
|
||||
*
|
||||
* Returns the size in bytes of the encoded integer - at most 9 bytes
|
||||
* @out: destination to encode to
|
||||
* @v: unsigned integer to encode
|
||||
* Returns: size in bytes of the encoded integer - at most 9 bytes
|
||||
*/
|
||||
int bch2_varint_encode(u8 *out, u64 v)
|
||||
{
|
||||
@ -40,11 +39,10 @@ int bch2_varint_encode(u8 *out, u64 v)
|
||||
|
||||
/**
|
||||
* bch2_varint_decode - encode a variable length integer
|
||||
* @in - varint to decode
|
||||
* @end - end of buffer to decode from
|
||||
* @out - on success, decoded integer
|
||||
*
|
||||
* Returns the size in bytes of the decoded integer - or -1 on failure (would
|
||||
* @in: varint to decode
|
||||
* @end: end of buffer to decode from
|
||||
* @out: on success, decoded integer
|
||||
* Returns: size in bytes of the decoded integer - or -1 on failure (would
|
||||
* have read past the end of the buffer)
|
||||
*/
|
||||
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
|
||||
@ -73,6 +71,9 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
|
||||
|
||||
/**
|
||||
* bch2_varint_encode_fast - fast version of bch2_varint_encode
|
||||
* @out: destination to encode to
|
||||
* @v: unsigned integer to encode
|
||||
* Returns: size in bytes of the encoded integer - at most 9 bytes
|
||||
*
|
||||
* This version assumes it's always safe to write 8 bytes to @out, even if the
|
||||
* encoded integer would be smaller.
|
||||
@ -96,6 +97,11 @@ int bch2_varint_encode_fast(u8 *out, u64 v)
|
||||
|
||||
/**
|
||||
* bch2_varint_decode_fast - fast version of bch2_varint_decode
|
||||
* @in: varint to decode
|
||||
* @end: end of buffer to decode from
|
||||
* @out: on success, decoded integer
|
||||
* Returns: size in bytes of the decoded integer - or -1 on failure (would
|
||||
* have read past the end of the buffer)
|
||||
*
|
||||
* This version assumes that it is safe to read at most 8 bytes past the end of
|
||||
* @end (we still return an error if the varint extends past @end).
|
||||
|
Loading…
Reference in New Issue
Block a user