mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
bcachefs: Reduce/kill BKEY_PADDED use
With various newer key types - stripe keys, inline data extents - the old approach of calculating the maximum size of the value is becoming more and more error prone. Better to switch to bkey_on_stack, which can dynamically allocate if necessary to handle any size bkey. In particular we also want to get rid of BKEY_EXTENT_VAL_U64s_MAX. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
8deed5f4e5
commit
07a1006ae8
@ -638,8 +638,6 @@ struct bch_reservation {
|
||||
#define BKEY_EXTENT_VAL_U64s_MAX \
|
||||
(1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
|
||||
|
||||
#define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX)
|
||||
|
||||
/* * Maximum possible size of an entire extent, key + value: */
|
||||
#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
|
||||
|
||||
|
60
fs/bcachefs/bkey_buf.h
Normal file
60
fs/bcachefs/bkey_buf.h
Normal file
@ -0,0 +1,60 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BCACHEFS_BKEY_BUF_H
|
||||
#define _BCACHEFS_BKEY_BUF_H
|
||||
|
||||
#include "bcachefs.h"
|
||||
|
||||
struct bkey_buf {
|
||||
struct bkey_i *k;
|
||||
u64 onstack[12];
|
||||
};
|
||||
|
||||
static inline void bch2_bkey_buf_realloc(struct bkey_buf *s,
|
||||
struct bch_fs *c, unsigned u64s)
|
||||
{
|
||||
if (s->k == (void *) s->onstack &&
|
||||
u64s > ARRAY_SIZE(s->onstack)) {
|
||||
s->k = mempool_alloc(&c->large_bkey_pool, GFP_NOFS);
|
||||
memcpy(s->k, s->onstack, sizeof(s->onstack));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bch2_bkey_buf_reassemble(struct bkey_buf *s,
|
||||
struct bch_fs *c,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
bch2_bkey_buf_realloc(s, c, k.k->u64s);
|
||||
bkey_reassemble(s->k, k);
|
||||
}
|
||||
|
||||
static inline void bch2_bkey_buf_copy(struct bkey_buf *s,
|
||||
struct bch_fs *c,
|
||||
struct bkey_i *src)
|
||||
{
|
||||
bch2_bkey_buf_realloc(s, c, src->k.u64s);
|
||||
bkey_copy(s->k, src);
|
||||
}
|
||||
|
||||
static inline void bch2_bkey_buf_unpack(struct bkey_buf *s,
|
||||
struct bch_fs *c,
|
||||
struct btree *b,
|
||||
struct bkey_packed *src)
|
||||
{
|
||||
bch2_bkey_buf_realloc(s, c, BKEY_U64s +
|
||||
bkeyp_val_u64s(&b->format, src));
|
||||
bch2_bkey_unpack(b, s->k, src);
|
||||
}
|
||||
|
||||
static inline void bch2_bkey_buf_init(struct bkey_buf *s)
|
||||
{
|
||||
s->k = (void *) s->onstack;
|
||||
}
|
||||
|
||||
static inline void bch2_bkey_buf_exit(struct bkey_buf *s, struct bch_fs *c)
|
||||
{
|
||||
if (s->k != (void *) s->onstack)
|
||||
mempool_free(s->k, &c->large_bkey_pool);
|
||||
s->k = NULL;
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_BKEY_BUF_H */
|
@ -1,43 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BCACHEFS_BKEY_ON_STACK_H
|
||||
#define _BCACHEFS_BKEY_ON_STACK_H
|
||||
|
||||
#include "bcachefs.h"
|
||||
|
||||
struct bkey_on_stack {
|
||||
struct bkey_i *k;
|
||||
u64 onstack[12];
|
||||
};
|
||||
|
||||
static inline void bkey_on_stack_realloc(struct bkey_on_stack *s,
|
||||
struct bch_fs *c, unsigned u64s)
|
||||
{
|
||||
if (s->k == (void *) s->onstack &&
|
||||
u64s > ARRAY_SIZE(s->onstack)) {
|
||||
s->k = mempool_alloc(&c->large_bkey_pool, GFP_NOFS);
|
||||
memcpy(s->k, s->onstack, sizeof(s->onstack));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bkey_on_stack_reassemble(struct bkey_on_stack *s,
|
||||
struct bch_fs *c,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
bkey_on_stack_realloc(s, c, k.k->u64s);
|
||||
bkey_reassemble(s->k, k);
|
||||
}
|
||||
|
||||
static inline void bkey_on_stack_init(struct bkey_on_stack *s)
|
||||
{
|
||||
s->k = (void *) s->onstack;
|
||||
}
|
||||
|
||||
static inline void bkey_on_stack_exit(struct bkey_on_stack *s,
|
||||
struct bch_fs *c)
|
||||
{
|
||||
if (s->k != (void *) s->onstack)
|
||||
mempool_free(s->k, &c->large_bkey_pool);
|
||||
s->k = NULL;
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_BKEY_ON_STACK_H */
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "bkey_sort.h"
|
||||
#include "bset.h"
|
||||
#include "extents.h"
|
||||
@ -187,11 +187,11 @@ bch2_sort_repack_merge(struct bch_fs *c,
|
||||
bool filter_whiteouts)
|
||||
{
|
||||
struct bkey_packed *out = vstruct_last(dst), *k_packed;
|
||||
struct bkey_on_stack k;
|
||||
struct bkey_buf k;
|
||||
struct btree_nr_keys nr;
|
||||
|
||||
memset(&nr, 0, sizeof(nr));
|
||||
bkey_on_stack_init(&k);
|
||||
bch2_bkey_buf_init(&k);
|
||||
|
||||
while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
|
||||
if (filter_whiteouts && bkey_whiteout(k_packed))
|
||||
@ -204,7 +204,7 @@ bch2_sort_repack_merge(struct bch_fs *c,
|
||||
* node; we have to make a copy of the entire key before calling
|
||||
* normalize
|
||||
*/
|
||||
bkey_on_stack_realloc(&k, c, k_packed->u64s + BKEY_U64s);
|
||||
bch2_bkey_buf_realloc(&k, c, k_packed->u64s + BKEY_U64s);
|
||||
bch2_bkey_unpack(src, k.k, k_packed);
|
||||
|
||||
if (filter_whiteouts &&
|
||||
@ -215,7 +215,7 @@ bch2_sort_repack_merge(struct bch_fs *c,
|
||||
}
|
||||
|
||||
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
|
||||
bkey_on_stack_exit(&k, c);
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
return nr;
|
||||
}
|
||||
|
||||
@ -315,11 +315,11 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
|
||||
struct bkey l_unpacked, r_unpacked;
|
||||
struct bkey_s l, r;
|
||||
struct btree_nr_keys nr;
|
||||
struct bkey_on_stack split;
|
||||
struct bkey_buf split;
|
||||
unsigned i;
|
||||
|
||||
memset(&nr, 0, sizeof(nr));
|
||||
bkey_on_stack_init(&split);
|
||||
bch2_bkey_buf_init(&split);
|
||||
|
||||
sort_iter_sort(iter, extent_sort_fix_overlapping_cmp);
|
||||
for (i = 0; i < iter->used;) {
|
||||
@ -379,7 +379,7 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
|
||||
/*
|
||||
* r wins, but it overlaps in the middle of l - split l:
|
||||
*/
|
||||
bkey_on_stack_reassemble(&split, c, l.s_c);
|
||||
bch2_bkey_buf_reassemble(&split, c, l.s_c);
|
||||
bch2_cut_back(bkey_start_pos(r.k), split.k);
|
||||
|
||||
bch2_cut_front_s(r.k->p, l);
|
||||
@ -398,7 +398,7 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
|
||||
|
||||
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
|
||||
|
||||
bkey_on_stack_exit(&split, c);
|
||||
bch2_bkey_buf_exit(&split, c);
|
||||
return nr;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_io.h"
|
||||
#include "btree_iter.h"
|
||||
@ -899,10 +900,12 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
||||
struct btree *parent;
|
||||
struct btree_node_iter node_iter;
|
||||
struct bkey_packed *k;
|
||||
BKEY_PADDED(k) tmp;
|
||||
struct bkey_buf tmp;
|
||||
struct btree *ret = NULL;
|
||||
unsigned level = b->c.level;
|
||||
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
parent = btree_iter_node(iter, level + 1);
|
||||
if (!parent)
|
||||
return NULL;
|
||||
@ -936,9 +939,9 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
||||
if (!k)
|
||||
goto out;
|
||||
|
||||
bch2_bkey_unpack(parent, &tmp.k, k);
|
||||
bch2_bkey_buf_unpack(&tmp, c, parent, k);
|
||||
|
||||
ret = bch2_btree_node_get(c, iter, &tmp.k, level,
|
||||
ret = bch2_btree_node_get(c, iter, tmp.k, level,
|
||||
SIX_LOCK_intent, _THIS_IP_);
|
||||
|
||||
if (PTR_ERR_OR_ZERO(ret) == -EINTR && !trans->nounlock) {
|
||||
@ -958,7 +961,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
||||
if (sib == btree_prev_sib)
|
||||
btree_node_unlock(iter, level);
|
||||
|
||||
ret = bch2_btree_node_get(c, iter, &tmp.k, level,
|
||||
ret = bch2_btree_node_get(c, iter, tmp.k, level,
|
||||
SIX_LOCK_intent, _THIS_IP_);
|
||||
|
||||
/*
|
||||
@ -999,6 +1002,8 @@ out:
|
||||
|
||||
bch2_btree_trans_verify_locks(trans);
|
||||
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "alloc_background.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_methods.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_locking.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "btree_io.h"
|
||||
@ -267,10 +267,12 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos next_node_start = b->data->min_key;
|
||||
struct bkey_buf tmp;
|
||||
u8 max_stale = 0;
|
||||
int ret = 0;
|
||||
|
||||
bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
bch2_bkey_debugcheck(c, b, k);
|
||||
@ -284,10 +286,9 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
|
||||
|
||||
if (b->c.level) {
|
||||
struct btree *child;
|
||||
BKEY_PADDED(k) tmp;
|
||||
|
||||
bkey_reassemble(&tmp.k, k);
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
bch2_bkey_buf_reassemble(&tmp, c, k);
|
||||
k = bkey_i_to_s_c(tmp.k);
|
||||
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
|
||||
@ -299,7 +300,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
|
||||
break;
|
||||
|
||||
if (b->c.level > target_depth) {
|
||||
child = bch2_btree_node_get_noiter(c, &tmp.k,
|
||||
child = bch2_btree_node_get_noiter(c, tmp.k,
|
||||
b->c.btree_id, b->c.level - 1);
|
||||
ret = PTR_ERR_OR_ZERO(child);
|
||||
if (ret)
|
||||
@ -317,6 +318,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
|
||||
}
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -929,10 +931,10 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
int ret = 0;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
|
||||
@ -941,7 +943,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k))) {
|
||||
if (gc_btree_gens_key(c, k)) {
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
|
||||
|
||||
bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
|
||||
@ -961,7 +963,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1073,7 +1075,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
|
||||
}
|
||||
|
||||
if (bch2_keylist_realloc(&keylist, NULL, 0,
|
||||
(BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
|
||||
BKEY_BTREE_PTR_U64s_MAX * nr_old_nodes)) {
|
||||
trace_btree_gc_coalesce_fail(c,
|
||||
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
|
||||
return;
|
||||
|
@ -1320,12 +1320,13 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
|
||||
struct btree_write_bio *wbio)
|
||||
{
|
||||
struct btree *b = wbio->wbio.bio.bi_private;
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
struct bkey_buf k;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
int ret;
|
||||
|
||||
bch2_bkey_buf_init(&k);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
|
||||
@ -1344,21 +1345,22 @@ retry:
|
||||
|
||||
BUG_ON(!btree_node_hashed(b));
|
||||
|
||||
bkey_copy(&tmp.k, &b->key);
|
||||
bch2_bkey_buf_copy(&k, c, &b->key);
|
||||
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
|
||||
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
|
||||
|
||||
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&tmp.k)))
|
||||
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
|
||||
goto err;
|
||||
|
||||
ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
|
||||
ret = bch2_btree_node_update_key(c, iter, b, k.k);
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
if (ret)
|
||||
goto err;
|
||||
out:
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
bio_put(&wbio->wbio.bio);
|
||||
btree_node_write_done(c, b);
|
||||
return;
|
||||
@ -1476,7 +1478,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
struct bset *i;
|
||||
struct btree_node *bn = NULL;
|
||||
struct btree_node_entry *bne = NULL;
|
||||
BKEY_PADDED(key) k;
|
||||
struct bkey_buf k;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct sort_iter sort_iter;
|
||||
struct nonce nonce;
|
||||
@ -1487,6 +1489,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
bool validate_before_checksum = false;
|
||||
void *data;
|
||||
|
||||
bch2_bkey_buf_init(&k);
|
||||
|
||||
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
|
||||
return;
|
||||
|
||||
@ -1696,15 +1700,16 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
* just make all btree node writes FUA to keep things sane.
|
||||
*/
|
||||
|
||||
bkey_copy(&k.key, &b->key);
|
||||
bch2_bkey_buf_copy(&k, c, &b->key);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr)
|
||||
ptr->offset += b->written;
|
||||
|
||||
b->written += sectors_to_write;
|
||||
|
||||
/* XXX: submitting IO with btree locks held: */
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, &k.key);
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
return;
|
||||
err:
|
||||
set_btree_node_noevict(b);
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_methods.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_iter.h"
|
||||
#include "btree_key_cache.h"
|
||||
@ -1048,27 +1049,31 @@ static void btree_iter_prefetch(struct btree_iter *iter)
|
||||
struct btree_iter_level *l = &iter->l[iter->level];
|
||||
struct btree_node_iter node_iter = l->iter;
|
||||
struct bkey_packed *k;
|
||||
BKEY_PADDED(k) tmp;
|
||||
struct bkey_buf tmp;
|
||||
unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
|
||||
? (iter->level > 1 ? 0 : 2)
|
||||
: (iter->level > 1 ? 1 : 16);
|
||||
bool was_locked = btree_node_locked(iter, iter->level);
|
||||
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
while (nr) {
|
||||
if (!bch2_btree_node_relock(iter, iter->level))
|
||||
return;
|
||||
break;
|
||||
|
||||
bch2_btree_node_iter_advance(&node_iter, l->b);
|
||||
k = bch2_btree_node_iter_peek(&node_iter, l->b);
|
||||
if (!k)
|
||||
break;
|
||||
|
||||
bch2_bkey_unpack(l->b, &tmp.k, k);
|
||||
bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1);
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
|
||||
bch2_btree_node_prefetch(c, iter, tmp.k, iter->level - 1);
|
||||
}
|
||||
|
||||
if (!was_locked)
|
||||
btree_node_unlock(iter, iter->level);
|
||||
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
}
|
||||
|
||||
static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
|
||||
@ -1100,30 +1105,34 @@ static __always_inline int btree_iter_down(struct btree_iter *iter,
|
||||
struct btree *b;
|
||||
unsigned level = iter->level - 1;
|
||||
enum six_lock_type lock_type = __btree_lock_want(iter, level);
|
||||
BKEY_PADDED(k) tmp;
|
||||
struct bkey_buf tmp;
|
||||
int ret;
|
||||
|
||||
EBUG_ON(!btree_node_locked(iter, iter->level));
|
||||
|
||||
bch2_bkey_unpack(l->b, &tmp.k,
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b,
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b));
|
||||
|
||||
b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type, trace_ip);
|
||||
if (unlikely(IS_ERR(b)))
|
||||
return PTR_ERR(b);
|
||||
b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
|
||||
ret = PTR_ERR_OR_ZERO(b);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
|
||||
mark_btree_node_locked(iter, level, lock_type);
|
||||
btree_iter_node_set(iter, b);
|
||||
|
||||
if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
unlikely(b != btree_node_mem_ptr(&tmp.k)))
|
||||
if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
unlikely(b != btree_node_mem_ptr(tmp.k)))
|
||||
btree_node_mem_ptr_set(iter, level + 1, b);
|
||||
|
||||
if (iter->flags & BTREE_ITER_PREFETCH)
|
||||
btree_iter_prefetch(iter);
|
||||
|
||||
iter->level = level;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void btree_iter_up(struct btree_iter *iter)
|
||||
|
@ -57,7 +57,7 @@ struct btree_write {
|
||||
|
||||
struct btree_alloc {
|
||||
struct open_buckets ob;
|
||||
BKEY_PADDED(k);
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
|
||||
};
|
||||
|
||||
struct btree_bkey_cached_common {
|
||||
|
@ -195,7 +195,7 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
|
||||
{
|
||||
struct write_point *wp;
|
||||
struct btree *b;
|
||||
BKEY_PADDED(k) tmp;
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
struct open_buckets ob = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
unsigned nr_reserve;
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "bset.h"
|
||||
#include "btree_gc.h"
|
||||
#include "btree_update.h"
|
||||
@ -783,10 +783,10 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_extent e;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
int ret = 0, dev, idx;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
|
||||
/* XXX this doesn't support the reflink btree */
|
||||
@ -813,7 +813,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
|
||||
|
||||
dev = s->key.v.ptrs[idx].dev;
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
e = bkey_i_to_s_extent(sk.k);
|
||||
|
||||
bch2_bkey_drop_ptrs(e.s, ptr, ptr->dev != dev);
|
||||
@ -834,7 +834,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "buckets.h"
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
#include "clock.h"
|
||||
@ -774,7 +774,7 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct readpages_iter *readpages_iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
int flags = BCH_READ_RETRY_IF_STALE|
|
||||
BCH_READ_MAY_PROMOTE;
|
||||
int ret = 0;
|
||||
@ -782,7 +782,7 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
rbio->c = c;
|
||||
rbio->start_time = local_clock();
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
retry:
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
@ -800,7 +800,7 @@ retry:
|
||||
bkey_start_offset(k.k);
|
||||
sectors = k.k->size - offset_into_extent;
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
|
||||
ret = bch2_read_indirect_extent(trans,
|
||||
&offset_into_extent, &sk);
|
||||
@ -845,7 +845,7 @@ retry:
|
||||
bio_endio(&rbio->bio);
|
||||
}
|
||||
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
}
|
||||
|
||||
void bch2_readahead(struct readahead_control *ractl)
|
||||
@ -2431,7 +2431,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
|
||||
{
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct address_space *mapping = inode->v.i_mapping;
|
||||
struct bkey_on_stack copy;
|
||||
struct bkey_buf copy;
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *src, *dst;
|
||||
loff_t shift, new_size;
|
||||
@ -2441,7 +2441,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
|
||||
if ((offset | len) & (block_bytes(c) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
bkey_on_stack_init(©);
|
||||
bch2_bkey_buf_init(©);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
|
||||
|
||||
/*
|
||||
@ -2529,7 +2529,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
|
||||
bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
|
||||
break;
|
||||
reassemble:
|
||||
bkey_on_stack_reassemble(©, c, k);
|
||||
bch2_bkey_buf_reassemble(©, c, k);
|
||||
|
||||
if (insert &&
|
||||
bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
|
||||
@ -2606,7 +2606,7 @@ bkey_err:
|
||||
}
|
||||
err:
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(©, c);
|
||||
bch2_bkey_buf_exit(©, c);
|
||||
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
|
||||
inode_unlock(&inode->v);
|
||||
return ret;
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "acl.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
#include "chardev.h"
|
||||
@ -899,7 +899,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_on_stack cur, prev;
|
||||
struct bkey_buf cur, prev;
|
||||
struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
|
||||
unsigned offset_into_extent, sectors;
|
||||
bool have_extent = false;
|
||||
@ -912,8 +912,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
|
||||
if (start + len < start)
|
||||
return -EINVAL;
|
||||
|
||||
bkey_on_stack_init(&cur);
|
||||
bkey_on_stack_init(&prev);
|
||||
bch2_bkey_buf_init(&cur);
|
||||
bch2_bkey_buf_init(&prev);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
@ -932,7 +932,7 @@ retry:
|
||||
bkey_start_offset(k.k);
|
||||
sectors = k.k->size - offset_into_extent;
|
||||
|
||||
bkey_on_stack_reassemble(&cur, c, k);
|
||||
bch2_bkey_buf_reassemble(&cur, c, k);
|
||||
|
||||
ret = bch2_read_indirect_extent(&trans,
|
||||
&offset_into_extent, &cur);
|
||||
@ -940,7 +940,7 @@ retry:
|
||||
break;
|
||||
|
||||
k = bkey_i_to_s_c(cur.k);
|
||||
bkey_on_stack_realloc(&prev, c, k.k->u64s);
|
||||
bch2_bkey_buf_realloc(&prev, c, k.k->u64s);
|
||||
|
||||
sectors = min(sectors, k.k->size - offset_into_extent);
|
||||
|
||||
@ -974,8 +974,8 @@ retry:
|
||||
FIEMAP_EXTENT_LAST);
|
||||
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
bkey_on_stack_exit(&cur, c);
|
||||
bkey_on_stack_exit(&prev, c);
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "dirent.h"
|
||||
#include "error.h"
|
||||
@ -464,11 +464,11 @@ static int check_extents(struct bch_fs *c)
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_on_stack prev;
|
||||
struct bkey_buf prev;
|
||||
u64 i_sectors;
|
||||
int ret = 0;
|
||||
|
||||
bkey_on_stack_init(&prev);
|
||||
bch2_bkey_buf_init(&prev);
|
||||
prev.k->k = KEY(0, 0, 0);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
|
||||
@ -500,7 +500,7 @@ retry:
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
bkey_on_stack_reassemble(&prev, c, k);
|
||||
bch2_bkey_buf_reassemble(&prev, c, k);
|
||||
|
||||
ret = walk_inode(&trans, &w, k.k->p.inode);
|
||||
if (ret)
|
||||
@ -569,7 +569,7 @@ err:
|
||||
fsck_err:
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
bkey_on_stack_exit(&prev, c);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
return bch2_trans_exit(&trans) ?: ret;
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_background.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "bset.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
@ -425,14 +425,14 @@ int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
|
||||
int bch2_write_index_default(struct bch_write_op *op)
|
||||
{
|
||||
struct bch_fs *c = op->c;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
struct keylist *keys = &op->insert_keys;
|
||||
struct bkey_i *k = bch2_keylist_front(keys);
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
int ret;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
@ -444,7 +444,7 @@ int bch2_write_index_default(struct bch_write_op *op)
|
||||
|
||||
k = bch2_keylist_front(keys);
|
||||
|
||||
bkey_on_stack_realloc(&sk, c, k->k.u64s);
|
||||
bch2_bkey_buf_realloc(&sk, c, k->k.u64s);
|
||||
bkey_copy(sk.k, k);
|
||||
bch2_cut_front(iter->pos, sk.k);
|
||||
|
||||
@ -461,7 +461,7 @@ int bch2_write_index_default(struct bch_write_op *op)
|
||||
} while (!bch2_keylist_empty(keys));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1620,14 +1620,14 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
flags &= ~BCH_READ_LAST_FRAGMENT;
|
||||
flags |= BCH_READ_MUST_CLONE;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
@ -1639,7 +1639,7 @@ retry:
|
||||
if (bkey_err(k))
|
||||
goto err;
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
k = bkey_i_to_s_c(sk.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
@ -1660,7 +1660,7 @@ retry:
|
||||
out:
|
||||
bch2_rbio_done(rbio);
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
return;
|
||||
err:
|
||||
rbio->bio.bi_status = BLK_STS_IOERR;
|
||||
@ -1673,14 +1673,14 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
flags &= ~BCH_READ_LAST_FRAGMENT;
|
||||
flags |= BCH_READ_MUST_CLONE;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
@ -1690,7 +1690,7 @@ retry:
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
|
||||
offset_into_extent = iter->pos.offset -
|
||||
bkey_start_offset(k.k);
|
||||
@ -1739,7 +1739,7 @@ err:
|
||||
rbio->bio.bi_status = BLK_STS_IOERR;
|
||||
out:
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
bch2_rbio_done(rbio);
|
||||
}
|
||||
|
||||
@ -1810,17 +1810,6 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
if ((ret = bkey_err(k)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* going to be temporarily appending another checksum entry:
|
||||
*/
|
||||
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
|
||||
BKEY_EXTENT_U64s_MAX * 8);
|
||||
if ((ret = PTR_ERR_OR_ZERO(new)))
|
||||
goto out;
|
||||
|
||||
bkey_reassemble(new, k);
|
||||
k = bkey_i_to_s_c(new);
|
||||
|
||||
if (bversion_cmp(k.k->version, rbio->version) ||
|
||||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
|
||||
goto out;
|
||||
@ -1839,6 +1828,16 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* going to be temporarily appending another checksum entry:
|
||||
*/
|
||||
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
|
||||
sizeof(struct bch_extent_crc128));
|
||||
if ((ret = PTR_ERR_OR_ZERO(new)))
|
||||
goto out;
|
||||
|
||||
bkey_reassemble(new, k);
|
||||
|
||||
if (!bch2_bkey_narrow_crcs(new, new_crc))
|
||||
goto out;
|
||||
|
||||
@ -2005,7 +2004,7 @@ static void bch2_read_endio(struct bio *bio)
|
||||
|
||||
int __bch2_read_indirect_extent(struct btree_trans *trans,
|
||||
unsigned *offset_into_extent,
|
||||
struct bkey_on_stack *orig_k)
|
||||
struct bkey_buf *orig_k)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
@ -2032,7 +2031,7 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
*offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
|
||||
bkey_on_stack_reassemble(orig_k, trans->c, k);
|
||||
bch2_bkey_buf_reassemble(orig_k, trans->c, k);
|
||||
err:
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
@ -2304,7 +2303,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
struct bkey_s_c k;
|
||||
unsigned flags = BCH_READ_RETRY_IF_STALE|
|
||||
BCH_READ_MAY_PROMOTE|
|
||||
@ -2318,7 +2317,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
rbio->c = c;
|
||||
rbio->start_time = local_clock();
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
@ -2341,7 +2340,7 @@ retry:
|
||||
bkey_start_offset(k.k);
|
||||
sectors = k.k->size - offset_into_extent;
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
|
||||
ret = bch2_read_indirect_extent(&trans,
|
||||
&offset_into_extent, &sk);
|
||||
@ -2378,7 +2377,7 @@ retry:
|
||||
}
|
||||
out:
|
||||
bch2_trans_exit(&trans);
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
return;
|
||||
err:
|
||||
if (ret == -EINTR)
|
||||
|
@ -3,7 +3,7 @@
|
||||
#define _BCACHEFS_IO_H
|
||||
|
||||
#include "checksum.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "io_types.h"
|
||||
|
||||
#define to_wbio(_bio) \
|
||||
@ -118,11 +118,11 @@ struct cache_promote_op;
|
||||
struct extent_ptr_decoded;
|
||||
|
||||
int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
|
||||
struct bkey_on_stack *);
|
||||
struct bkey_buf *);
|
||||
|
||||
static inline int bch2_read_indirect_extent(struct btree_trans *trans,
|
||||
unsigned *offset_into_extent,
|
||||
struct bkey_on_stack *k)
|
||||
struct bkey_buf *k)
|
||||
{
|
||||
return k->k->k.type == KEY_TYPE_reflink_p
|
||||
? __bch2_read_indirect_extent(trans, offset_into_extent, k)
|
||||
|
@ -1097,7 +1097,7 @@ int bch2_fs_journal_init(struct journal *j)
|
||||
|
||||
/* Btree roots: */
|
||||
j->entry_u64s_reserved +=
|
||||
BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
|
||||
BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX);
|
||||
|
||||
atomic64_set(&j->reservations.counter,
|
||||
((union journal_res_state)
|
||||
|
@ -989,6 +989,8 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
|
||||
done:
|
||||
rcu_read_unlock();
|
||||
|
||||
BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
|
||||
|
||||
return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
struct journal_buf {
|
||||
struct jset *data;
|
||||
|
||||
BKEY_PADDED(key);
|
||||
__BKEY_PADDED(key, BCH_REPLICAS_MAX);
|
||||
|
||||
struct closure_waitlist wait;
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "buckets.h"
|
||||
@ -41,10 +41,10 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
int ret = 0;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
|
||||
@ -57,7 +57,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
|
||||
continue;
|
||||
}
|
||||
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
|
||||
ret = drop_dev_ptrs(c, bkey_i_to_s(sk.k),
|
||||
dev_idx, flags, false);
|
||||
@ -90,7 +90,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
|
||||
}
|
||||
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
BUG_ON(ret == -EINTR);
|
||||
|
||||
@ -109,6 +109,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
struct btree_iter *iter;
|
||||
struct closure cl;
|
||||
struct btree *b;
|
||||
struct bkey_buf k;
|
||||
unsigned id;
|
||||
int ret;
|
||||
|
||||
@ -116,28 +117,28 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
if (flags & BCH_FORCE_IF_METADATA_LOST)
|
||||
return -EINVAL;
|
||||
|
||||
bch2_bkey_buf_init(&k);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
closure_init_stack(&cl);
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
for_each_btree_node(&trans, iter, id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, b) {
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
retry:
|
||||
if (!bch2_bkey_has_device(bkey_i_to_s_c(&b->key),
|
||||
dev_idx))
|
||||
continue;
|
||||
|
||||
bkey_copy(&tmp.k, &b->key);
|
||||
bch2_bkey_buf_copy(&k, c, &b->key);
|
||||
|
||||
ret = drop_dev_ptrs(c, bkey_i_to_s(&tmp.k),
|
||||
ret = drop_dev_ptrs(c, bkey_i_to_s(k.k),
|
||||
dev_idx, flags, true);
|
||||
if (ret) {
|
||||
bch_err(c, "Cannot drop device without losing data");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
|
||||
ret = bch2_btree_node_update_key(c, iter, b, k.k);
|
||||
if (ret == -EINTR) {
|
||||
b = bch2_btree_iter_peek_node(iter);
|
||||
goto retry;
|
||||
@ -157,6 +158,7 @@ retry:
|
||||
ret = 0;
|
||||
err:
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
|
||||
BUG_ON(ret == -EINTR);
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_gc.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
@ -60,8 +60,13 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
struct migrate_write *m =
|
||||
container_of(op, struct migrate_write, op);
|
||||
struct keylist *keys = &op->insert_keys;
|
||||
struct bkey_buf _new, _insert;
|
||||
int ret = 0;
|
||||
|
||||
bch2_bkey_buf_init(&_new);
|
||||
bch2_bkey_buf_init(&_insert);
|
||||
bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
|
||||
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, m->btree_id,
|
||||
@ -72,7 +77,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i *insert;
|
||||
struct bkey_i_extent *new;
|
||||
BKEY_PADDED(k) _new, _insert;
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
bool did_work = false;
|
||||
@ -92,11 +96,11 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
!bch2_bkey_matches_ptr(c, k, m->ptr, m->offset))
|
||||
goto nomatch;
|
||||
|
||||
bkey_reassemble(&_insert.k, k);
|
||||
insert = &_insert.k;
|
||||
bkey_reassemble(_insert.k, k);
|
||||
insert = _insert.k;
|
||||
|
||||
bkey_copy(&_new.k, bch2_keylist_front(keys));
|
||||
new = bkey_i_to_extent(&_new.k);
|
||||
bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
|
||||
new = bkey_i_to_extent(_new.k);
|
||||
bch2_cut_front(iter->pos, &new->k_i);
|
||||
|
||||
bch2_cut_front(iter->pos, insert);
|
||||
@ -192,6 +196,8 @@ nomatch:
|
||||
}
|
||||
out:
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_bkey_buf_exit(&_insert, c);
|
||||
bch2_bkey_buf_exit(&_new, c);
|
||||
BUG_ON(ret == -EINTR);
|
||||
return ret;
|
||||
}
|
||||
@ -511,7 +517,7 @@ static int __bch2_move_data(struct bch_fs *c,
|
||||
{
|
||||
bool kthread = (current->flags & PF_KTHREAD) != 0;
|
||||
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
|
||||
struct bkey_on_stack sk;
|
||||
struct bkey_buf sk;
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
@ -520,7 +526,7 @@ static int __bch2_move_data(struct bch_fs *c,
|
||||
u64 delay, cur_inum = U64_MAX;
|
||||
int ret = 0, ret2;
|
||||
|
||||
bkey_on_stack_init(&sk);
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
stats->data_type = BCH_DATA_user;
|
||||
@ -600,7 +606,7 @@ peek:
|
||||
}
|
||||
|
||||
/* unlock before doing IO: */
|
||||
bkey_on_stack_reassemble(&sk, c, k);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
k = bkey_i_to_s_c(sk.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
@ -634,7 +640,7 @@ next_nondata:
|
||||
}
|
||||
out:
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
bkey_on_stack_exit(&sk, c);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "alloc_background.h"
|
||||
#include "btree_gc.h"
|
||||
#include "btree_update.h"
|
||||
@ -224,28 +225,29 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
|
||||
|
||||
if (b->c.level) {
|
||||
struct btree *child;
|
||||
BKEY_PADDED(k) tmp;
|
||||
struct bkey_buf tmp;
|
||||
|
||||
bkey_reassemble(&tmp.k, k);
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
bch2_bkey_buf_reassemble(&tmp, c, k);
|
||||
k = bkey_i_to_s_c(tmp.k);
|
||||
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
|
||||
if (b->c.level > 0) {
|
||||
child = bch2_btree_node_get_noiter(c, &tmp.k,
|
||||
b->c.btree_id, b->c.level - 1);
|
||||
ret = PTR_ERR_OR_ZERO(child);
|
||||
if (ret)
|
||||
break;
|
||||
child = bch2_btree_node_get_noiter(c, tmp.k,
|
||||
b->c.btree_id, b->c.level - 1);
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
|
||||
ret = (node_fn ? node_fn(c, b) : 0) ?:
|
||||
bch2_btree_and_journal_walk_recurse(c, child,
|
||||
journal_keys, btree_id, node_fn, key_fn);
|
||||
six_unlock_read(&child->c.lock);
|
||||
ret = PTR_ERR_OR_ZERO(child);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
ret = (node_fn ? node_fn(c, b) : 0) ?:
|
||||
bch2_btree_and_journal_walk_recurse(c, child,
|
||||
journal_keys, btree_id, node_fn, key_fn);
|
||||
six_unlock_read(&child->c.lock);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
} else {
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_on_stack.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "extents.h"
|
||||
#include "inode.h"
|
||||
@ -198,8 +198,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *dst_iter, *src_iter;
|
||||
struct bkey_s_c src_k;
|
||||
BKEY_PADDED(k) new_dst;
|
||||
struct bkey_on_stack new_src;
|
||||
struct bkey_buf new_dst, new_src;
|
||||
struct bpos dst_end = dst_start, src_end = src_start;
|
||||
struct bpos dst_want, src_want;
|
||||
u64 src_done, dst_done;
|
||||
@ -216,7 +215,8 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
dst_end.offset += remap_sectors;
|
||||
src_end.offset += remap_sectors;
|
||||
|
||||
bkey_on_stack_init(&new_src);
|
||||
bch2_bkey_buf_init(&new_dst);
|
||||
bch2_bkey_buf_init(&new_src);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
|
||||
|
||||
src_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
|
||||
@ -257,7 +257,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
break;
|
||||
|
||||
if (src_k.k->type != KEY_TYPE_reflink_p) {
|
||||
bkey_on_stack_reassemble(&new_src, c, src_k);
|
||||
bch2_bkey_buf_reassemble(&new_src, c, src_k);
|
||||
src_k = bkey_i_to_s_c(new_src.k);
|
||||
|
||||
bch2_cut_front(src_iter->pos, new_src.k);
|
||||
@ -275,7 +275,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
struct bkey_s_c_reflink_p src_p =
|
||||
bkey_s_c_to_reflink_p(src_k);
|
||||
struct bkey_i_reflink_p *dst_p =
|
||||
bkey_reflink_p_init(&new_dst.k);
|
||||
bkey_reflink_p_init(new_dst.k);
|
||||
|
||||
u64 offset = le64_to_cpu(src_p.v->idx) +
|
||||
(src_iter->pos.offset -
|
||||
@ -286,12 +286,12 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
BUG();
|
||||
}
|
||||
|
||||
new_dst.k.k.p = dst_iter->pos;
|
||||
bch2_key_resize(&new_dst.k.k,
|
||||
new_dst.k->k.p = dst_iter->pos;
|
||||
bch2_key_resize(&new_dst.k->k,
|
||||
min(src_k.k->p.offset - src_iter->pos.offset,
|
||||
dst_end.offset - dst_iter->pos.offset));
|
||||
|
||||
ret = bch2_extent_update(&trans, dst_iter, &new_dst.k,
|
||||
ret = bch2_extent_update(&trans, dst_iter, new_dst.k,
|
||||
NULL, journal_seq,
|
||||
new_i_size, i_sectors_delta);
|
||||
if (ret)
|
||||
@ -333,7 +333,8 @@ err:
|
||||
} while (ret2 == -EINTR);
|
||||
|
||||
ret = bch2_trans_exit(&trans) ?: ret;
|
||||
bkey_on_stack_exit(&new_src, c);
|
||||
bch2_bkey_buf_exit(&new_src, c);
|
||||
bch2_bkey_buf_exit(&new_dst, c);
|
||||
|
||||
percpu_ref_put(&c->writes);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user