2017-03-17 06:18:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _BCACHEFS_BTREE_CACHE_H
|
|
|
|
#define _BCACHEFS_BTREE_CACHE_H
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
|
|
|
#include "btree_types.h"
|
2022-10-21 23:15:07 +00:00
|
|
|
#include "bkey_methods.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-02-26 16:10:20 +00:00
|
|
|
extern const char * const bch2_btree_node_flags[];
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
struct btree_iter;
|
|
|
|
|
|
|
|
void bch2_recalc_btree_reserve(struct bch_fs *);
|
|
|
|
|
|
|
|
void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
|
|
|
|
int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
|
|
|
|
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
|
|
|
|
unsigned, enum btree_id);
|
|
|
|
|
|
|
|
void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
|
|
|
|
int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
|
|
|
|
|
2021-04-21 00:21:12 +00:00
|
|
|
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
|
2023-03-02 07:12:18 +00:00
|
|
|
struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
|
2017-03-17 06:18:50 +00:00
|
|
|
const struct bkey_i *, unsigned,
|
2020-10-28 18:17:46 +00:00
|
|
|
enum six_lock_type, unsigned long);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-08-21 18:29:43 +00:00
|
|
|
struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
|
2021-01-27 01:59:00 +00:00
|
|
|
enum btree_id, unsigned, bool);
|
2020-03-16 03:29:43 +00:00
|
|
|
|
2023-03-02 07:12:18 +00:00
|
|
|
int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
|
2021-08-30 19:18:31 +00:00
|
|
|
const struct bkey_i *, enum btree_id, unsigned);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-08-21 18:29:43 +00:00
|
|
|
void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
|
2021-04-25 20:24:03 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
void bch2_fs_btree_cache_exit(struct bch_fs *);
|
|
|
|
int bch2_fs_btree_cache_init(struct bch_fs *);
|
|
|
|
void bch2_fs_btree_cache_init_early(struct btree_cache *);
|
|
|
|
|
2020-02-18 22:15:32 +00:00
|
|
|
static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
|
|
|
|
{
|
|
|
|
switch (k->k.type) {
|
|
|
|
case KEY_TYPE_btree_ptr:
|
|
|
|
return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
|
2020-02-07 18:38:02 +00:00
|
|
|
case KEY_TYPE_btree_ptr_v2:
|
2023-07-07 02:47:42 +00:00
|
|
|
/*
|
|
|
|
* The cast/deref is only necessary to avoid sparse endianness
|
|
|
|
* warnings:
|
|
|
|
*/
|
|
|
|
return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
|
2020-02-18 22:15:32 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-02-24 20:25:00 +00:00
|
|
|
static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
|
|
|
|
{
|
|
|
|
return k->k.type == KEY_TYPE_btree_ptr_v2
|
|
|
|
? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
|
|
|
|
: NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* is btree node in hash table? */
|
|
|
|
static inline bool btree_node_hashed(struct btree *b)
|
|
|
|
{
|
2020-02-18 22:15:32 +00:00
|
|
|
return b->hash_val != 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
|
|
|
|
for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
|
|
|
|
&(_c)->btree_cache.table), \
|
|
|
|
_iter = 0; _iter < (_tbl)->size; _iter++) \
|
|
|
|
rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
|
|
|
|
|
|
|
|
static inline size_t btree_bytes(struct bch_fs *c)
|
|
|
|
{
|
2021-12-14 19:24:41 +00:00
|
|
|
return c->opts.btree_node_size;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t btree_max_u64s(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t btree_pages(struct bch_fs *c)
|
|
|
|
{
|
2020-07-25 19:07:37 +00:00
|
|
|
return btree_bytes(c) / PAGE_SIZE;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned btree_blocks(struct bch_fs *c)
|
|
|
|
{
|
2021-12-14 19:24:41 +00:00
|
|
|
return btree_sectors(c) >> c->block_bits;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 16:31:16 +00:00
|
|
|
#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
#define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
|
|
|
|
#define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
|
|
|
|
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
|
2021-03-31 20:10:21 +00:00
|
|
|
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) >> 2))
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-06-29 02:09:13 +00:00
|
|
|
static inline unsigned btree_id_nr_alive(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
return BTREE_ID_NR + c->btree_roots_extra.nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned id)
|
|
|
|
{
|
|
|
|
if (likely(id < BTREE_ID_NR)) {
|
|
|
|
return &c->btree_roots_known[id];
|
|
|
|
} else {
|
|
|
|
unsigned idx = id - BTREE_ID_NR;
|
|
|
|
|
|
|
|
EBUG_ON(idx >= c->btree_roots_extra.nr);
|
|
|
|
return &c->btree_roots_extra.data[idx];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
|
|
|
|
{
|
|
|
|
return bch2_btree_id_root(c, b->c.btree_id)->b;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-10-20 02:49:08 +00:00
|
|
|
const char *bch2_btree_id_str(enum btree_id);
|
|
|
|
void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
|
|
|
|
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
|
2023-03-06 07:34:59 +00:00
|
|
|
void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
#endif /* _BCACHEFS_BTREE_CACHE_H */
|