2017-03-17 06:18:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _BCACHEFS_BTREE_UPDATE_H
|
|
|
|
#define _BCACHEFS_BTREE_UPDATE_H
|
|
|
|
|
|
|
|
#include "btree_iter.h"
|
|
|
|
#include "journal.h"
|
2023-05-28 03:19:13 +00:00
|
|
|
#include "journal.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
struct bch_fs;
|
|
|
|
struct btree;
|
|
|
|
|
2022-10-09 09:04:38 +00:00
|
|
|
void bch2_btree_node_prep_for_write(struct btree_trans *,
|
|
|
|
struct btree_path *, struct btree *);
|
2021-08-30 19:18:31 +00:00
|
|
|
bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *,
|
2021-08-25 01:30:06 +00:00
|
|
|
struct btree *, struct btree_node_iter *,
|
|
|
|
struct bkey_i *);
|
2023-03-07 12:28:20 +00:00
|
|
|
|
|
|
|
int bch2_btree_node_flush0(struct journal *, struct journal_entry_pin *, u64);
|
|
|
|
int bch2_btree_node_flush1(struct journal *, struct journal_entry_pin *, u64);
|
2020-02-09 00:06:31 +00:00
|
|
|
void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *,
|
|
|
|
struct bkey_i *, u64);
|
|
|
|
|
2019-12-31 21:17:42 +00:00
|
|
|
enum btree_insert_flags {
|
2023-06-27 21:32:38 +00:00
|
|
|
/* First bits for bch_watermark: */
|
|
|
|
__BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS,
|
2018-11-19 06:31:41 +00:00
|
|
|
__BTREE_INSERT_NOCHECK_RW,
|
2019-03-22 02:19:57 +00:00
|
|
|
__BTREE_INSERT_LAZY_RW,
|
2017-03-17 06:18:50 +00:00
|
|
|
__BTREE_INSERT_JOURNAL_REPLAY,
|
2019-03-08 00:46:10 +00:00
|
|
|
__BTREE_INSERT_JOURNAL_RECLAIM,
|
2017-03-17 06:18:50 +00:00
|
|
|
__BTREE_INSERT_NOWAIT,
|
|
|
|
__BTREE_INSERT_GC_LOCK_HELD,
|
|
|
|
__BCH_HASH_SET_MUST_CREATE,
|
|
|
|
__BCH_HASH_SET_MUST_REPLACE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Don't check for -ENOSPC: */
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BTREE_INSERT_NOFAIL BIT(__BTREE_INSERT_NOFAIL)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BTREE_INSERT_NOCHECK_RW BIT(__BTREE_INSERT_NOCHECK_RW)
|
|
|
|
#define BTREE_INSERT_LAZY_RW BIT(__BTREE_INSERT_LAZY_RW)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-02-13 19:46:32 +00:00
|
|
|
/* Insert is for journal replay - don't get journal reservations: */
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BTREE_INSERT_JOURNAL_REPLAY BIT(__BTREE_INSERT_JOURNAL_REPLAY)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-08 00:46:10 +00:00
|
|
|
/* Insert is being called from journal reclaim path: */
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BTREE_INSERT_JOURNAL_RECLAIM BIT(__BTREE_INSERT_JOURNAL_RECLAIM)
|
2019-03-08 00:46:10 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Don't block on allocation failure (for new btree nodes: */
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BTREE_INSERT_NOWAIT BIT(__BTREE_INSERT_NOWAIT)
|
|
|
|
#define BTREE_INSERT_GC_LOCK_HELD BIT(__BTREE_INSERT_GC_LOCK_HELD)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-06-27 21:32:48 +00:00
|
|
|
#define BCH_HASH_SET_MUST_CREATE BIT(__BCH_HASH_SET_MUST_CREATE)
|
|
|
|
#define BCH_HASH_SET_MUST_REPLACE BIT(__BCH_HASH_SET_MUST_REPLACE)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-04-09 19:07:11 +00:00
|
|
|
int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
|
|
|
|
unsigned, unsigned);
|
2019-03-14 00:49:16 +00:00
|
|
|
int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
|
2023-03-31 20:24:45 +00:00
|
|
|
int bch2_btree_delete_at_buffered(struct btree_trans *, enum btree_id, struct bpos);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-02-17 04:09:27 +00:00
|
|
|
int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
|
|
|
|
struct bkey_i *, enum btree_update_flags);
|
|
|
|
|
2023-01-19 12:27:30 +00:00
|
|
|
int __bch2_btree_insert(struct btree_trans *, enum btree_id, struct bkey_i *,
|
|
|
|
enum btree_update_flags);
|
2017-03-17 06:18:50 +00:00
|
|
|
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
2018-08-08 23:53:30 +00:00
|
|
|
struct disk_reservation *, u64 *, int flags);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-11-21 02:28:55 +00:00
|
|
|
int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
2021-04-20 04:15:44 +00:00
|
|
|
struct bpos, struct bpos, unsigned, u64 *);
|
2017-03-17 06:18:50 +00:00
|
|
|
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
2021-12-29 18:49:34 +00:00
|
|
|
struct bpos, struct bpos, unsigned, u64 *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-07-17 04:56:07 +00:00
|
|
|
int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
|
|
|
|
|
2023-05-28 03:19:13 +00:00
|
|
|
int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
|
|
|
|
struct bpos, struct bpos);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For use when splitting extents in existing snapshots:
|
|
|
|
*
|
|
|
|
* If @old_pos is an interior snapshot node, iterate over descendent snapshot
|
|
|
|
* nodes: for every descendent snapshot in whiche @old_pos is overwritten and
|
|
|
|
* not visible, emit a whiteout at @new_pos.
|
|
|
|
*/
|
|
|
|
static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
|
|
|
enum btree_id btree,
|
|
|
|
struct bpos old_pos,
|
|
|
|
struct bpos new_pos)
|
|
|
|
{
|
|
|
|
if (!btree_type_has_snapshots(btree) ||
|
|
|
|
bkey_eq(old_pos, new_pos))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
|
|
|
|
}
|
|
|
|
|
2022-01-09 02:22:31 +00:00
|
|
|
int bch2_trans_update_extent(struct btree_trans *, struct btree_iter *,
|
|
|
|
struct bkey_i *, enum btree_update_flags);
|
|
|
|
|
2023-04-28 03:20:18 +00:00
|
|
|
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
|
|
|
enum btree_id, struct bpos);
|
|
|
|
|
2021-12-05 05:30:49 +00:00
|
|
|
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
|
|
|
struct bkey_i *, enum btree_update_flags);
|
2023-07-19 12:53:05 +00:00
|
|
|
int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *,
|
|
|
|
struct bkey_i *, enum btree_update_flags);
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
int __must_check bch2_trans_update_buffered(struct btree_trans *,
|
|
|
|
enum btree_id, struct bkey_i *);
|
2022-01-09 02:22:31 +00:00
|
|
|
|
2021-02-04 02:51:56 +00:00
|
|
|
void bch2_trans_commit_hook(struct btree_trans *,
|
|
|
|
struct btree_trans_commit_hook *);
|
2023-02-09 18:22:12 +00:00
|
|
|
int __bch2_trans_commit(struct btree_trans *, unsigned);
|
2019-10-19 23:03:23 +00:00
|
|
|
|
2022-12-14 15:39:04 +00:00
|
|
|
int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
|
2023-03-22 12:27:58 +00:00
|
|
|
int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
|
2022-03-30 19:44:12 +00:00
|
|
|
|
2019-10-19 23:03:23 +00:00
|
|
|
/**
|
|
|
|
* bch2_trans_commit - insert keys at given iterator positions
|
|
|
|
*
|
|
|
|
* This is main entry point for btree updates.
|
|
|
|
*
|
|
|
|
* Return values:
|
|
|
|
* -EROFS: filesystem read only
|
|
|
|
* -EIO: journal or btree node IO error
|
|
|
|
*/
|
|
|
|
static inline int bch2_trans_commit(struct btree_trans *trans,
|
|
|
|
struct disk_reservation *disk_res,
|
|
|
|
u64 *journal_seq,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
trans->disk_res = disk_res;
|
|
|
|
trans->journal_seq = journal_seq;
|
|
|
|
|
2023-02-09 18:22:12 +00:00
|
|
|
return __bch2_trans_commit(trans, flags);
|
2019-10-19 23:03:23 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-07-13 09:25:29 +00:00
|
|
|
#define commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
|
2021-04-07 07:11:07 +00:00
|
|
|
lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
|
|
|
|
(_journal_seq), (_flags)))
|
|
|
|
|
2022-07-17 23:35:38 +00:00
|
|
|
#define nested_commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
|
|
|
|
nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
|
|
|
|
(_journal_seq), (_flags)))
|
|
|
|
|
2019-12-23 04:04:30 +00:00
|
|
|
#define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \
|
|
|
|
({ \
|
|
|
|
struct btree_trans trans; \
|
2021-10-19 19:08:00 +00:00
|
|
|
int _ret; \
|
2019-12-23 04:04:30 +00:00
|
|
|
\
|
|
|
|
bch2_trans_init(&trans, (_c), 0, 0); \
|
2022-07-13 09:25:29 +00:00
|
|
|
_ret = commit_do(&trans, _disk_res, _journal_seq, _flags, _do); \
|
2021-10-19 19:08:00 +00:00
|
|
|
bch2_trans_exit(&trans); \
|
2019-12-23 04:04:30 +00:00
|
|
|
\
|
2021-10-19 19:08:00 +00:00
|
|
|
_ret; \
|
2019-12-23 04:04:30 +00:00
|
|
|
})
|
|
|
|
|
2022-07-14 06:08:58 +00:00
|
|
|
#define bch2_trans_run(_c, _do) \
|
|
|
|
({ \
|
|
|
|
struct btree_trans trans; \
|
|
|
|
int _ret; \
|
|
|
|
\
|
|
|
|
bch2_trans_init(&trans, (_c), 0, 0); \
|
|
|
|
_ret = (_do); \
|
|
|
|
bch2_trans_exit(&trans); \
|
|
|
|
\
|
|
|
|
_ret; \
|
|
|
|
})
|
|
|
|
|
2019-09-22 22:49:16 +00:00
|
|
|
#define trans_for_each_update(_trans, _i) \
|
2019-03-15 21:11:58 +00:00
|
|
|
for ((_i) = (_trans)->updates; \
|
2019-09-22 22:49:16 +00:00
|
|
|
(_i) < (_trans)->updates + (_trans)->nr_updates; \
|
2019-03-15 21:11:58 +00:00
|
|
|
(_i)++)
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
#define trans_for_each_wb_update(_trans, _i) \
|
|
|
|
for ((_i) = (_trans)->wb_updates; \
|
|
|
|
(_i) < (_trans)->wb_updates + (_trans)->nr_wb_updates; \
|
|
|
|
(_i)++)
|
|
|
|
|
2022-05-29 15:38:48 +00:00
|
|
|
static inline void bch2_trans_reset_updates(struct btree_trans *trans)
|
|
|
|
{
|
|
|
|
struct btree_insert_entry *i;
|
|
|
|
|
|
|
|
trans_for_each_update(trans, i)
|
|
|
|
bch2_path_put(trans, i->path, true);
|
|
|
|
|
|
|
|
trans->extra_journal_res = 0;
|
|
|
|
trans->nr_updates = 0;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
trans->nr_wb_updates = 0;
|
|
|
|
trans->wb_updates = NULL;
|
2022-05-29 15:38:48 +00:00
|
|
|
trans->hooks = NULL;
|
|
|
|
trans->extra_journal_entries.nr = 0;
|
2023-02-05 19:07:34 +00:00
|
|
|
|
|
|
|
if (trans->fs_usage_deltas) {
|
|
|
|
trans->fs_usage_deltas->used = 0;
|
|
|
|
memset((void *) trans->fs_usage_deltas +
|
|
|
|
offsetof(struct replicas_delta_list, memset_start), 0,
|
|
|
|
(void *) &trans->fs_usage_deltas->memset_end -
|
|
|
|
(void *) &trans->fs_usage_deltas->memset_start);
|
|
|
|
}
|
2022-05-29 15:38:48 +00:00
|
|
|
}
|
|
|
|
|
2023-04-30 23:21:06 +00:00
|
|
|
static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k,
|
2023-04-28 03:48:33 +00:00
|
|
|
unsigned type, unsigned min_bytes)
|
2023-05-01 00:58:59 +00:00
|
|
|
{
|
2023-04-28 03:48:33 +00:00
|
|
|
unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k));
|
|
|
|
struct bkey_i *mut;
|
|
|
|
|
|
|
|
if (type && k.k->type != type)
|
|
|
|
return ERR_PTR(-ENOENT);
|
2023-05-01 00:58:59 +00:00
|
|
|
|
2023-04-28 03:48:33 +00:00
|
|
|
mut = bch2_trans_kmalloc_nomemzero(trans, bytes);
|
|
|
|
if (!IS_ERR(mut)) {
|
2023-05-01 00:58:59 +00:00
|
|
|
bkey_reassemble(mut, k);
|
2023-04-28 03:48:33 +00:00
|
|
|
|
|
|
|
if (unlikely(bytes > bkey_bytes(k.k))) {
|
|
|
|
memset((void *) mut + bkey_bytes(k.k), 0,
|
|
|
|
bytes - bkey_bytes(k.k));
|
|
|
|
mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64));
|
|
|
|
}
|
|
|
|
}
|
2023-05-01 00:58:59 +00:00
|
|
|
return mut;
|
|
|
|
}
|
|
|
|
|
2023-04-30 23:21:06 +00:00
|
|
|
static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k)
|
2023-05-01 00:58:59 +00:00
|
|
|
{
|
2023-04-30 23:21:06 +00:00
|
|
|
return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0);
|
2023-04-28 03:48:33 +00:00
|
|
|
}
|
2023-05-01 00:58:59 +00:00
|
|
|
|
2023-04-30 23:21:06 +00:00
|
|
|
#define bch2_bkey_make_mut_noupdate_typed(_trans, _k, _type) \
|
|
|
|
bkey_i_to_##_type(__bch2_bkey_make_mut_noupdate(_trans, _k, \
|
|
|
|
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
|
|
|
|
|
|
|
|
static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
|
2023-06-26 22:36:24 +00:00
|
|
|
struct bkey_s_c *k, unsigned flags,
|
2023-04-30 23:21:06 +00:00
|
|
|
unsigned type, unsigned min_bytes)
|
|
|
|
{
|
2023-06-26 22:36:24 +00:00
|
|
|
struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
|
2023-04-30 23:21:06 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (IS_ERR(mut))
|
|
|
|
return mut;
|
|
|
|
|
|
|
|
ret = bch2_trans_update(trans, iter, mut, flags);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2023-06-26 22:36:24 +00:00
|
|
|
|
|
|
|
*k = bkey_i_to_s_c(mut);
|
2023-04-30 23:21:06 +00:00
|
|
|
return mut;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
|
2023-06-26 22:36:24 +00:00
|
|
|
struct bkey_s_c *k, unsigned flags)
|
2023-04-30 23:21:06 +00:00
|
|
|
{
|
|
|
|
return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define bch2_bkey_make_mut_typed(_trans, _iter, _k, _flags, _type) \
|
|
|
|
bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\
|
2023-04-28 03:48:33 +00:00
|
|
|
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
|
|
|
|
|
2023-04-30 22:46:24 +00:00
|
|
|
static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
|
2023-04-28 03:48:33 +00:00
|
|
|
struct btree_iter *iter,
|
|
|
|
unsigned btree_id, struct bpos pos,
|
|
|
|
unsigned flags, unsigned type, unsigned min_bytes)
|
|
|
|
{
|
|
|
|
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
|
|
|
|
btree_id, pos, flags|BTREE_ITER_INTENT, type);
|
|
|
|
struct bkey_i *ret = unlikely(IS_ERR(k.k))
|
2023-05-01 00:58:59 +00:00
|
|
|
? ERR_CAST(k.k)
|
2023-04-30 23:21:06 +00:00
|
|
|
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
|
2023-04-28 03:48:33 +00:00
|
|
|
if (unlikely(IS_ERR(ret)))
|
|
|
|
bch2_trans_iter_exit(trans, iter);
|
|
|
|
return ret;
|
2023-05-01 00:58:59 +00:00
|
|
|
}
|
|
|
|
|
2023-04-30 22:46:24 +00:00
|
|
|
static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
unsigned btree_id, struct bpos pos,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
unsigned btree_id, struct bpos pos,
|
|
|
|
unsigned flags, unsigned type, unsigned min_bytes)
|
|
|
|
{
|
|
|
|
struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
|
|
|
|
btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (IS_ERR(mut))
|
|
|
|
return mut;
|
|
|
|
|
|
|
|
ret = bch2_trans_update(trans, iter, mut, flags);
|
|
|
|
if (ret) {
|
|
|
|
bch2_trans_iter_exit(trans, iter);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return mut;
|
|
|
|
}
|
|
|
|
|
2023-04-28 03:48:33 +00:00
|
|
|
static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
unsigned btree_id, struct bpos pos,
|
|
|
|
unsigned flags, unsigned min_bytes)
|
|
|
|
{
|
|
|
|
return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
unsigned btree_id, struct bpos pos,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
2023-04-30 22:46:24 +00:00
|
|
|
return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0);
|
2023-04-28 03:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
|
|
|
|
bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \
|
|
|
|
_btree_id, _pos, _flags, \
|
|
|
|
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
|
|
|
|
|
|
|
|
static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter,
|
2023-04-30 22:59:28 +00:00
|
|
|
unsigned flags, unsigned type, unsigned val_size)
|
2023-04-28 03:48:33 +00:00
|
|
|
{
|
|
|
|
struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size);
|
2023-04-30 22:59:28 +00:00
|
|
|
int ret;
|
2023-04-28 03:48:33 +00:00
|
|
|
|
2023-04-30 22:59:28 +00:00
|
|
|
if (IS_ERR(k))
|
|
|
|
return k;
|
|
|
|
|
|
|
|
bkey_init(&k->k);
|
|
|
|
k->k.p = iter->pos;
|
|
|
|
k->k.type = type;
|
|
|
|
set_bkey_val_bytes(&k->k, val_size);
|
2023-04-28 03:48:33 +00:00
|
|
|
|
2023-04-30 22:59:28 +00:00
|
|
|
ret = bch2_trans_update(trans, iter, k, flags);
|
|
|
|
if (unlikely(ret))
|
|
|
|
return ERR_PTR(ret);
|
2023-04-28 03:48:33 +00:00
|
|
|
return k;
|
|
|
|
}
|
2023-05-01 00:58:59 +00:00
|
|
|
|
2023-04-30 22:59:28 +00:00
|
|
|
#define bch2_bkey_alloc(_trans, _iter, _flags, _type) \
|
|
|
|
bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, _flags, \
|
2023-04-28 03:48:33 +00:00
|
|
|
KEY_TYPE_##_type, sizeof(struct bch_##_type)))
|
2023-05-01 00:58:59 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
#endif /* _BCACHEFS_BTREE_UPDATE_H */
|