2017-03-17 06:18:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM bcachefs
|
|
|
|
|
|
|
|
#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
|
2022-08-10 16:42:55 +00:00
|
|
|
#define TRACE_BPOS_entries(name) \
|
|
|
|
__field(u64, name##_inode ) \
|
|
|
|
__field(u64, name##_offset ) \
|
|
|
|
__field(u32, name##_snapshot )
|
|
|
|
|
|
|
|
#define TRACE_BPOS_assign(dst, src) \
|
|
|
|
__entry->dst##_inode = (src).inode; \
|
|
|
|
__entry->dst##_offset = (src).offset; \
|
|
|
|
__entry->dst##_snapshot = (src).snapshot
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DECLARE_EVENT_CLASS(bpos,
|
2022-08-18 21:00:12 +00:00
|
|
|
TP_PROTO(const struct bpos *p),
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_ARGS(p),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_entries(p)
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_assign(p, *p);
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DECLARE_EVENT_CLASS(fs_str,
|
2023-11-25 02:52:17 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str),
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-11-25 02:52:17 +00:00
|
|
|
__field(dev_t, dev )
|
|
|
|
__string(str, str )
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2023-11-25 02:52:17 +00:00
|
|
|
__entry->dev = c->dev;
|
2024-05-16 17:34:54 +00:00
|
|
|
__assign_str(str);
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2024-01-15 22:59:51 +00:00
|
|
|
TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DECLARE_EVENT_CLASS(trans_str,
|
|
|
|
TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
|
|
|
|
TP_ARGS(trans, caller_ip, str),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__string(str, str )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = trans->c->dev;
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
2024-05-16 17:34:54 +00:00
|
|
|
__assign_str(str);
|
2023-11-26 22:02:06 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %s %pS %s",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
|
|
|
|
);
|
|
|
|
|
2024-01-04 23:59:17 +00:00
|
|
|
DECLARE_EVENT_CLASS(trans_str_nocaller,
|
|
|
|
TP_PROTO(struct btree_trans *trans, const char *str),
|
|
|
|
TP_ARGS(trans, str),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__string(str, str )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = trans->c->dev;
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2024-05-16 17:34:54 +00:00
|
|
|
__assign_str(str);
|
2024-01-04 23:59:17 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %s %s",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->trans_fn, __get_str(str))
|
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DECLARE_EVENT_CLASS(btree_node_nofs,
|
2022-08-27 16:48:36 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, struct btree *b),
|
|
|
|
TP_ARGS(c, b),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u8, level )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->level = b->c.level;
|
|
|
|
__entry->btree_id = b->c.btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, b->key.k.p);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %u %s %llu:%llu:%u",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->level,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2022-08-27 16:48:36 +00:00
|
|
|
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
|
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DECLARE_EVENT_CLASS(btree_node,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(u8, level )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = trans->c->dev;
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->level = b->c.level;
|
|
|
|
__entry->btree_id = b->c.btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, b->key.k.p);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %s %u %s %llu:%llu:%u",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
|
|
|
|
__entry->level,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
|
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DECLARE_EVENT_CLASS(bch_fs,
|
|
|
|
TP_PROTO(struct bch_fs *c),
|
|
|
|
TP_ARGS(c),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2021-05-27 23:15:44 +00:00
|
|
|
__field(dev_t, dev )
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2021-05-27 23:15:44 +00:00
|
|
|
__entry->dev = c->dev;
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2021-05-27 23:15:44 +00:00
|
|
|
TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DECLARE_EVENT_CLASS(btree_trans,
|
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = trans->c->dev;
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
|
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DECLARE_EVENT_CLASS(bio,
|
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(sector_t, sector )
|
|
|
|
__field(unsigned int, nr_sector )
|
|
|
|
__array(char, rwbs, 6 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
|
|
|
|
__entry->sector = bio->bi_iter.bi_sector;
|
|
|
|
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
|
|
|
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d %s %llu + %u",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
|
|
|
|
(unsigned long long)__entry->sector, __entry->nr_sector)
|
|
|
|
);
|
|
|
|
|
2024-05-31 02:35:09 +00:00
|
|
|
/* fs.c: */
|
|
|
|
TRACE_EVENT(bch2_sync_fs,
|
|
|
|
TP_PROTO(struct super_block *sb, int wait),
|
|
|
|
|
|
|
|
TP_ARGS(sb, wait),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( dev_t, dev )
|
|
|
|
__field( int, wait )
|
|
|
|
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = sb->s_dev;
|
|
|
|
__entry->wait = wait;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev %d,%d wait %d",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->wait)
|
|
|
|
);
|
|
|
|
|
|
|
|
/* fs-io.c: */
|
|
|
|
TRACE_EVENT(bch2_fsync,
|
|
|
|
TP_PROTO(struct file *file, int datasync),
|
|
|
|
|
|
|
|
TP_ARGS(file, datasync),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( dev_t, dev )
|
|
|
|
__field( ino_t, ino )
|
|
|
|
__field( ino_t, parent )
|
|
|
|
__field( int, datasync )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
struct dentry *dentry = file->f_path.dentry;
|
|
|
|
|
|
|
|
__entry->dev = dentry->d_sb->s_dev;
|
|
|
|
__entry->ino = d_inode(dentry)->i_ino;
|
|
|
|
__entry->parent = d_inode(dentry->d_parent)->i_ino;
|
|
|
|
__entry->datasync = datasync;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
(unsigned long) __entry->ino,
|
|
|
|
(unsigned long) __entry->parent, __entry->datasync)
|
|
|
|
);
|
|
|
|
|
2022-04-03 19:13:20 +00:00
|
|
|
/* super-io.c: */
|
|
|
|
TRACE_EVENT(write_super,
|
|
|
|
TP_PROTO(struct bch_fs *c, unsigned long ip),
|
|
|
|
TP_ARGS(c, ip),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(unsigned long, ip )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->ip = ip;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d for %pS",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
(void *) __entry->ip)
|
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* io.c: */
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(bio, read_promote,
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
2023-09-12 00:44:33 +00:00
|
|
|
TRACE_EVENT(read_nopromote,
|
|
|
|
TP_PROTO(struct bch_fs *c, int ret),
|
|
|
|
TP_ARGS(c, ret),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__array(char, ret, 32 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d,%d ret %s",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->ret)
|
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DEFINE_EVENT(bio, read_bounce,
|
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(bio, read_split,
|
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DEFINE_EVENT(bio, read_retry,
|
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(bio, read_reuse_race,
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
|
|
|
/* Journal */
|
|
|
|
|
|
|
|
DEFINE_EVENT(bch_fs, journal_full,
|
|
|
|
TP_PROTO(struct bch_fs *c),
|
|
|
|
TP_ARGS(c)
|
|
|
|
);
|
|
|
|
|
2024-01-15 22:59:51 +00:00
|
|
|
DEFINE_EVENT(fs_str, journal_entry_full,
|
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2024-01-15 22:59:51 +00:00
|
|
|
DEFINE_EVENT(fs_str, journal_entry_close,
|
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2023-11-17 01:41:10 +00:00
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
DEFINE_EVENT(bio, journal_write,
|
|
|
|
TP_PROTO(struct bio *bio),
|
|
|
|
TP_ARGS(bio)
|
|
|
|
);
|
|
|
|
|
2020-11-20 00:54:40 +00:00
|
|
|
TRACE_EVENT(journal_reclaim_start,
|
2022-04-17 22:06:31 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
|
|
|
|
u64 min_nr, u64 min_key_cache,
|
2020-11-20 00:54:40 +00:00
|
|
|
u64 btree_cache_dirty, u64 btree_cache_total,
|
|
|
|
u64 btree_key_cache_dirty, u64 btree_key_cache_total),
|
2023-11-05 02:54:26 +00:00
|
|
|
TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
|
2020-11-20 00:54:40 +00:00
|
|
|
btree_cache_dirty, btree_cache_total,
|
|
|
|
btree_key_cache_dirty, btree_key_cache_total),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2021-05-27 23:15:44 +00:00
|
|
|
__field(dev_t, dev )
|
2022-04-17 22:06:31 +00:00
|
|
|
__field(bool, direct )
|
|
|
|
__field(bool, kicked )
|
2020-11-20 00:54:40 +00:00
|
|
|
__field(u64, min_nr )
|
2022-04-17 22:06:31 +00:00
|
|
|
__field(u64, min_key_cache )
|
2020-11-20 00:54:40 +00:00
|
|
|
__field(u64, btree_cache_dirty )
|
|
|
|
__field(u64, btree_cache_total )
|
|
|
|
__field(u64, btree_key_cache_dirty )
|
|
|
|
__field(u64, btree_key_cache_total )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2021-05-27 23:15:44 +00:00
|
|
|
__entry->dev = c->dev;
|
2022-04-17 22:06:31 +00:00
|
|
|
__entry->direct = direct;
|
|
|
|
__entry->kicked = kicked;
|
2020-11-20 00:54:40 +00:00
|
|
|
__entry->min_nr = min_nr;
|
2022-04-17 22:06:31 +00:00
|
|
|
__entry->min_key_cache = min_key_cache;
|
2020-11-20 00:54:40 +00:00
|
|
|
__entry->btree_cache_dirty = btree_cache_dirty;
|
|
|
|
__entry->btree_cache_total = btree_cache_total;
|
|
|
|
__entry->btree_key_cache_dirty = btree_key_cache_dirty;
|
|
|
|
__entry->btree_key_cache_total = btree_key_cache_total;
|
|
|
|
),
|
|
|
|
|
2023-11-05 02:54:26 +00:00
|
|
|
TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
|
2021-05-27 23:15:44 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
2022-04-17 22:06:31 +00:00
|
|
|
__entry->direct,
|
|
|
|
__entry->kicked,
|
2020-11-20 00:54:40 +00:00
|
|
|
__entry->min_nr,
|
2022-04-17 22:06:31 +00:00
|
|
|
__entry->min_key_cache,
|
2020-11-20 00:54:40 +00:00
|
|
|
__entry->btree_cache_dirty,
|
|
|
|
__entry->btree_cache_total,
|
|
|
|
__entry->btree_key_cache_dirty,
|
|
|
|
__entry->btree_key_cache_total)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(journal_reclaim_finish,
|
|
|
|
TP_PROTO(struct bch_fs *c, u64 nr_flushed),
|
|
|
|
TP_ARGS(c, nr_flushed),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2021-05-27 23:15:44 +00:00
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u64, nr_flushed )
|
2020-11-20 00:54:40 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2021-05-27 23:15:44 +00:00
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->nr_flushed = nr_flushed;
|
2020-11-20 00:54:40 +00:00
|
|
|
),
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
TP_printk("%d,%d flushed %llu",
|
2021-05-27 23:15:44 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->nr_flushed)
|
2020-11-20 00:54:40 +00:00
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* bset.c: */
|
|
|
|
|
|
|
|
DEFINE_EVENT(bpos, bkey_pack_pos_fail,
|
2022-08-18 21:00:12 +00:00
|
|
|
TP_PROTO(const struct bpos *p),
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_ARGS(p)
|
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
/* Btree cache: */
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TRACE_EVENT(btree_cache_scan,
|
|
|
|
TP_PROTO(long nr_to_scan, long can_free, long ret),
|
|
|
|
TP_ARGS(nr_to_scan, can_free, ret),
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:48:36 +00:00
|
|
|
__field(long, nr_to_scan )
|
|
|
|
__field(long, can_free )
|
|
|
|
__field(long, ret )
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-08-27 16:48:36 +00:00
|
|
|
__entry->nr_to_scan = nr_to_scan;
|
|
|
|
__entry->can_free = can_free;
|
|
|
|
__entry->ret = ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TP_printk("scanned for %li nodes, can free %li, ret %li",
|
|
|
|
__entry->nr_to_scan, __entry->can_free, __entry->ret)
|
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
|
2022-08-27 16:48:36 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, struct btree *b),
|
|
|
|
TP_ARGS(c, b)
|
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
|
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans)
|
2022-08-27 16:48:36 +00:00
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
|
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans)
|
2022-08-27 16:48:36 +00:00
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
|
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2023-12-02 08:36:27 +00:00
|
|
|
DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
|
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans)
|
2022-08-27 16:48:36 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
/* Btree */
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_node, btree_node_read,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TRACE_EVENT(btree_node_write,
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
|
|
|
|
TP_ARGS(b, bytes, sectors),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2018-11-01 19:10:01 +00:00
|
|
|
__field(enum btree_node_type, type)
|
2017-03-17 06:18:50 +00:00
|
|
|
__field(unsigned, bytes )
|
|
|
|
__field(unsigned, sectors )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->type = btree_node_type(b);
|
|
|
|
__entry->bytes = bytes;
|
|
|
|
__entry->sectors = sectors;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("bkey type %u bytes %u sectors %u",
|
|
|
|
__entry->type , __entry->bytes, __entry->sectors)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_node, btree_node_alloc,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_node, btree_node_free,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_reserve_get_fail,
|
2022-01-12 04:24:43 +00:00
|
|
|
TP_PROTO(const char *trans_fn,
|
|
|
|
unsigned long caller_ip,
|
2023-02-02 03:51:51 +00:00
|
|
|
size_t required,
|
|
|
|
int ret),
|
|
|
|
TP_ARGS(trans_fn, caller_ip, required, ret),
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2022-01-12 04:24:43 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
2017-03-17 06:18:50 +00:00
|
|
|
__field(size_t, required )
|
2023-02-02 03:51:51 +00:00
|
|
|
__array(char, ret, 32 )
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
|
2022-01-12 04:24:43 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->required = required;
|
2023-02-02 03:51:51 +00:00
|
|
|
strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2023-02-02 03:51:51 +00:00
|
|
|
TP_printk("%s %pS required %zu ret %s",
|
2022-01-12 04:24:43 +00:00
|
|
|
__entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
2023-02-02 03:51:51 +00:00
|
|
|
__entry->required,
|
|
|
|
__entry->ret)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(btree_node, btree_node_compact,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(btree_node, btree_node_merge,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(btree_node, btree_node_split,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(btree_node, btree_node_rewrite,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2022-04-17 22:06:31 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(btree_node, btree_node_set_root,
|
2023-12-02 08:36:27 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree *b),
|
|
|
|
TP_ARGS(trans, b)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TRACE_EVENT(btree_path_relock_fail,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path,
|
|
|
|
unsigned level),
|
|
|
|
TP_ARGS(trans, caller_ip, path, level),
|
2022-01-09 03:59:58 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2022-02-16 05:42:34 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
2022-01-09 03:59:58 +00:00
|
|
|
__field(u8, btree_id )
|
2022-09-17 18:36:24 +00:00
|
|
|
__field(u8, level )
|
2022-08-10 23:57:46 +00:00
|
|
|
__field(u8, path_idx)
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_entries(pos)
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, node, 24 )
|
2023-08-18 02:04:20 +00:00
|
|
|
__field(u8, self_read_count )
|
|
|
|
__field(u8, self_intent_count)
|
|
|
|
__field(u8, read_count )
|
|
|
|
__field(u8, intent_count )
|
2022-01-09 03:59:58 +00:00
|
|
|
__field(u32, iter_lock_seq )
|
|
|
|
__field(u32, node_lock_seq )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-08-27 16:11:18 +00:00
|
|
|
struct btree *b = btree_path_node(path, level);
|
2023-08-18 02:04:20 +00:00
|
|
|
struct six_lock_count c;
|
2022-08-27 16:11:18 +00:00
|
|
|
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-02-16 05:42:34 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->btree_id = path->btree_id;
|
2022-08-10 23:57:46 +00:00
|
|
|
__entry->level = level;
|
|
|
|
__entry->path_idx = path - trans->paths;
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
2023-08-18 02:04:20 +00:00
|
|
|
|
2024-01-16 01:40:06 +00:00
|
|
|
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
|
2023-08-18 02:04:20 +00:00
|
|
|
__entry->self_read_count = c.n[SIX_LOCK_read];
|
|
|
|
__entry->self_intent_count = c.n[SIX_LOCK_intent];
|
|
|
|
|
|
|
|
if (IS_ERR(b)) {
|
2022-08-27 16:11:18 +00:00
|
|
|
strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
|
2023-08-18 02:04:20 +00:00
|
|
|
} else {
|
|
|
|
c = six_lock_counts(&path->l[level].b->c.lock);
|
|
|
|
__entry->read_count = c.n[SIX_LOCK_read];
|
|
|
|
__entry->intent_count = c.n[SIX_LOCK_intent];
|
2022-08-10 23:57:46 +00:00
|
|
|
scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
|
2023-08-18 02:04:20 +00:00
|
|
|
}
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->iter_lock_seq = path->l[level].lock_seq;
|
2023-05-21 03:57:48 +00:00
|
|
|
__entry->node_lock_seq = is_btree_node(path, level)
|
|
|
|
? six_lock_seq(&path->l[level].b->c.lock)
|
|
|
|
: 0;
|
2022-01-09 03:59:58 +00:00
|
|
|
),
|
|
|
|
|
2022-08-10 23:57:46 +00:00
|
|
|
TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
2022-01-09 03:59:58 +00:00
|
|
|
__entry->trans_fn,
|
2022-02-16 05:42:34 +00:00
|
|
|
(void *) __entry->caller_ip,
|
2022-08-10 23:57:46 +00:00
|
|
|
__entry->path_idx,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2022-01-09 03:59:58 +00:00
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->level,
|
2022-01-09 03:59:58 +00:00
|
|
|
__entry->node,
|
2023-08-18 02:04:20 +00:00
|
|
|
__entry->self_read_count,
|
|
|
|
__entry->self_intent_count,
|
|
|
|
__entry->read_count,
|
|
|
|
__entry->intent_count,
|
2022-01-09 03:59:58 +00:00
|
|
|
__entry->iter_lock_seq,
|
|
|
|
__entry->node_lock_seq)
|
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TRACE_EVENT(btree_path_upgrade_fail,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-08-07 17:43:32 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path,
|
|
|
|
unsigned level),
|
|
|
|
TP_ARGS(trans, caller_ip, path, level),
|
2022-08-07 17:43:32 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2022-08-07 17:43:32 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(u8, btree_id )
|
2022-09-17 18:36:24 +00:00
|
|
|
__field(u8, level )
|
2022-08-10 23:57:46 +00:00
|
|
|
__field(u8, path_idx)
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_entries(pos)
|
2022-08-07 17:43:32 +00:00
|
|
|
__field(u8, locked )
|
|
|
|
__field(u8, self_read_count )
|
|
|
|
__field(u8, self_intent_count)
|
2022-08-10 16:42:55 +00:00
|
|
|
__field(u8, read_count )
|
2022-08-07 17:43:32 +00:00
|
|
|
__field(u8, intent_count )
|
2022-09-17 18:36:24 +00:00
|
|
|
__field(u32, iter_lock_seq )
|
|
|
|
__field(u32, node_lock_seq )
|
2022-08-07 17:43:32 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-08-10 16:42:55 +00:00
|
|
|
struct six_lock_count c;
|
|
|
|
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-08-07 17:43:32 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->btree_id = path->btree_id;
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->level = level;
|
2022-08-10 23:57:46 +00:00
|
|
|
__entry->path_idx = path - trans->paths;
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
__entry->locked = btree_node_locked(path, level);
|
|
|
|
|
2022-08-22 17:21:10 +00:00
|
|
|
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
|
2022-08-22 03:08:53 +00:00
|
|
|
__entry->self_read_count = c.n[SIX_LOCK_read];
|
|
|
|
__entry->self_intent_count = c.n[SIX_LOCK_intent];
|
2022-08-10 16:42:55 +00:00
|
|
|
c = six_lock_counts(&path->l[level].b->c.lock);
|
2022-08-22 03:08:53 +00:00
|
|
|
__entry->read_count = c.n[SIX_LOCK_read];
|
2023-08-18 02:04:20 +00:00
|
|
|
__entry->intent_count = c.n[SIX_LOCK_intent];
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->iter_lock_seq = path->l[level].lock_seq;
|
2023-05-21 03:57:48 +00:00
|
|
|
__entry->node_lock_seq = is_btree_node(path, level)
|
|
|
|
? six_lock_seq(&path->l[level].b->c.lock)
|
|
|
|
: 0;
|
2022-08-07 17:43:32 +00:00
|
|
|
),
|
|
|
|
|
2022-08-10 23:57:46 +00:00
|
|
|
TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
2022-08-07 17:43:32 +00:00
|
|
|
__entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
2022-08-10 23:57:46 +00:00
|
|
|
__entry->path_idx,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2022-08-07 17:43:32 +00:00
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->level,
|
2022-08-07 17:43:32 +00:00
|
|
|
__entry->locked,
|
|
|
|
__entry->self_read_count,
|
|
|
|
__entry->self_intent_count,
|
|
|
|
__entry->read_count,
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->intent_count,
|
|
|
|
__entry->iter_lock_seq,
|
|
|
|
__entry->node_lock_seq)
|
2022-08-07 17:43:32 +00:00
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Garbage collection */
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(bch_fs, gc_gens_start,
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_PROTO(struct bch_fs *c),
|
|
|
|
TP_ARGS(c)
|
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(bch_fs, gc_gens_end,
|
2017-03-17 06:18:50 +00:00
|
|
|
TP_PROTO(struct bch_fs *c),
|
|
|
|
TP_ARGS(c)
|
|
|
|
);
|
|
|
|
|
|
|
|
/* Allocator */
|
|
|
|
|
2024-05-04 20:46:29 +00:00
|
|
|
DEFINE_EVENT(fs_str, bucket_alloc,
|
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
|
|
|
);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2024-05-04 20:46:29 +00:00
|
|
|
DEFINE_EVENT(fs_str, bucket_alloc_fail,
|
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
TRACE_EVENT(discard_buckets,
|
|
|
|
TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
|
2022-07-18 02:31:21 +00:00
|
|
|
u64 need_journal_commit, u64 discarded, const char *err),
|
|
|
|
TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
|
2022-04-17 22:06:31 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u64, seen )
|
|
|
|
__field(u64, open )
|
|
|
|
__field(u64, need_journal_commit )
|
|
|
|
__field(u64, discarded )
|
2022-07-18 02:31:21 +00:00
|
|
|
__array(char, err, 16 )
|
2022-04-17 22:06:31 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->seen = seen;
|
|
|
|
__entry->open = open;
|
|
|
|
__entry->need_journal_commit = need_journal_commit;
|
|
|
|
__entry->discarded = discarded;
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->err, err, sizeof(__entry->err));
|
2022-04-17 22:06:31 +00:00
|
|
|
),
|
|
|
|
|
2022-07-18 02:31:21 +00:00
|
|
|
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
|
2022-04-17 22:06:31 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->seen,
|
|
|
|
__entry->open,
|
|
|
|
__entry->need_journal_commit,
|
|
|
|
__entry->discarded,
|
2022-07-18 02:31:21 +00:00
|
|
|
__entry->err)
|
2022-04-17 22:06:31 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
TRACE_EVENT(bucket_invalidate,
|
2022-06-21 02:26:41 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
|
|
|
|
TP_ARGS(c, dev, bucket, sectors),
|
2022-04-17 22:06:31 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u32, dev_idx )
|
2022-06-21 02:26:41 +00:00
|
|
|
__field(u32, sectors )
|
2022-04-17 22:06:31 +00:00
|
|
|
__field(u64, bucket )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->dev_idx = dev;
|
2022-06-21 02:26:41 +00:00
|
|
|
__entry->sectors = sectors;
|
2022-04-17 22:06:31 +00:00
|
|
|
__entry->bucket = bucket;
|
|
|
|
),
|
|
|
|
|
2022-06-21 02:26:41 +00:00
|
|
|
TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
|
2022-04-17 22:06:31 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
2022-06-21 02:26:41 +00:00
|
|
|
__entry->dev_idx, __entry->bucket,
|
|
|
|
__entry->sectors)
|
2022-04-17 22:06:31 +00:00
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Moving IO */
|
|
|
|
|
2023-04-20 19:24:07 +00:00
|
|
|
TRACE_EVENT(bucket_evacuate,
|
|
|
|
TP_PROTO(struct bch_fs *c, struct bpos *bucket),
|
|
|
|
TP_ARGS(c, bucket),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u32, dev_idx )
|
|
|
|
__field(u64, bucket )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->dev_idx = bucket->inode;
|
|
|
|
__entry->bucket = bucket->offset;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%d:%d %u:%llu",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->dev_idx, __entry->bucket)
|
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent,
|
2024-01-15 20:04:40 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2023-04-20 19:24:07 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent_read,
|
2024-01-15 20:04:40 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2022-08-27 16:48:36 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent_write,
|
2024-01-15 20:04:40 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent_finish,
|
2024-01-15 20:04:40 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2024-01-15 20:04:40 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent_fail,
|
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2022-08-27 16:48:36 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, move_extent_start_fail,
|
2023-11-27 02:13:54 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(move_data,
|
2023-10-23 20:21:54 +00:00
|
|
|
TP_PROTO(struct bch_fs *c,
|
|
|
|
struct bch_move_stats *stats),
|
|
|
|
TP_ARGS(c, stats),
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-10-23 20:21:54 +00:00
|
|
|
__field(dev_t, dev )
|
2017-03-17 06:18:50 +00:00
|
|
|
__field(u64, keys_moved )
|
2023-10-23 20:21:54 +00:00
|
|
|
__field(u64, keys_raced )
|
|
|
|
__field(u64, sectors_seen )
|
|
|
|
__field(u64, sectors_moved )
|
|
|
|
__field(u64, sectors_raced )
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2023-10-23 20:21:54 +00:00
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->keys_moved = atomic64_read(&stats->keys_moved);
|
|
|
|
__entry->keys_raced = atomic64_read(&stats->keys_raced);
|
|
|
|
__entry->sectors_seen = atomic64_read(&stats->sectors_seen);
|
|
|
|
__entry->sectors_moved = atomic64_read(&stats->sectors_moved);
|
|
|
|
__entry->sectors_raced = atomic64_read(&stats->sectors_raced);
|
2017-03-17 06:18:50 +00:00
|
|
|
),
|
|
|
|
|
2023-10-23 20:21:54 +00:00
|
|
|
TP_printk("%d,%d keys moved %llu raced %llu"
|
|
|
|
"sectors seen %llu moved %llu raced %llu",
|
2021-05-27 23:15:44 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
2023-10-23 20:21:54 +00:00
|
|
|
__entry->keys_moved,
|
|
|
|
__entry->keys_raced,
|
|
|
|
__entry->sectors_seen,
|
|
|
|
__entry->sectors_moved,
|
|
|
|
__entry->sectors_raced)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2022-03-18 04:42:09 +00:00
|
|
|
TRACE_EVENT(evacuate_bucket,
|
|
|
|
TP_PROTO(struct bch_fs *c, struct bpos *bucket,
|
|
|
|
unsigned sectors, unsigned bucket_size,
|
2022-12-05 15:24:19 +00:00
|
|
|
u64 fragmentation, int ret),
|
|
|
|
TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
|
2022-03-18 04:42:09 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(dev_t, dev )
|
|
|
|
__field(u64, member )
|
|
|
|
__field(u64, bucket )
|
|
|
|
__field(u32, sectors )
|
|
|
|
__field(u32, bucket_size )
|
2022-12-05 15:24:19 +00:00
|
|
|
__field(u64, fragmentation )
|
2022-03-18 04:42:09 +00:00
|
|
|
__field(int, ret )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = c->dev;
|
|
|
|
__entry->member = bucket->inode;
|
|
|
|
__entry->bucket = bucket->offset;
|
|
|
|
__entry->sectors = sectors;
|
|
|
|
__entry->bucket_size = bucket_size;
|
2022-12-05 15:24:19 +00:00
|
|
|
__entry->fragmentation = fragmentation;
|
2022-03-18 04:42:09 +00:00
|
|
|
__entry->ret = ret;
|
|
|
|
),
|
|
|
|
|
2022-12-05 15:24:19 +00:00
|
|
|
TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
|
2022-03-18 04:42:09 +00:00
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->member, __entry->bucket,
|
|
|
|
__entry->sectors, __entry->bucket_size,
|
2022-12-05 15:24:19 +00:00
|
|
|
__entry->fragmentation, __entry->ret)
|
2022-03-18 04:42:09 +00:00
|
|
|
);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
TRACE_EVENT(copygc,
|
2020-07-11 20:28:54 +00:00
|
|
|
TP_PROTO(struct bch_fs *c,
|
2017-03-17 06:18:50 +00:00
|
|
|
u64 sectors_moved, u64 sectors_not_moved,
|
|
|
|
u64 buckets_moved, u64 buckets_not_moved),
|
2020-07-11 20:28:54 +00:00
|
|
|
TP_ARGS(c,
|
2017-03-17 06:18:50 +00:00
|
|
|
sectors_moved, sectors_not_moved,
|
|
|
|
buckets_moved, buckets_not_moved),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2021-05-27 23:15:44 +00:00
|
|
|
__field(dev_t, dev )
|
2017-03-17 06:18:50 +00:00
|
|
|
__field(u64, sectors_moved )
|
|
|
|
__field(u64, sectors_not_moved )
|
|
|
|
__field(u64, buckets_moved )
|
|
|
|
__field(u64, buckets_not_moved )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2021-05-27 23:15:44 +00:00
|
|
|
__entry->dev = c->dev;
|
2017-03-17 06:18:50 +00:00
|
|
|
__entry->sectors_moved = sectors_moved;
|
|
|
|
__entry->sectors_not_moved = sectors_not_moved;
|
|
|
|
__entry->buckets_moved = buckets_moved;
|
|
|
|
__entry->buckets_not_moved = buckets_moved;
|
|
|
|
),
|
|
|
|
|
2021-05-27 23:15:44 +00:00
|
|
|
TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->sectors_moved, __entry->sectors_not_moved,
|
|
|
|
__entry->buckets_moved, __entry->buckets_not_moved)
|
2017-03-17 06:18:50 +00:00
|
|
|
);
|
|
|
|
|
2021-05-26 05:03:35 +00:00
|
|
|
TRACE_EVENT(copygc_wait,
|
|
|
|
TP_PROTO(struct bch_fs *c,
|
|
|
|
u64 wait_amount, u64 until),
|
|
|
|
TP_ARGS(c, wait_amount, until),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2021-05-27 23:15:44 +00:00
|
|
|
__field(dev_t, dev )
|
2021-05-26 05:03:35 +00:00
|
|
|
__field(u64, wait_amount )
|
|
|
|
__field(u64, until )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2021-05-27 23:15:44 +00:00
|
|
|
__entry->dev = c->dev;
|
2021-05-26 05:03:35 +00:00
|
|
|
__entry->wait_amount = wait_amount;
|
|
|
|
__entry->until = until;
|
|
|
|
),
|
|
|
|
|
2021-05-27 23:15:44 +00:00
|
|
|
TP_printk("%d,%u waiting for %llu sectors until %llu",
|
|
|
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
|
|
__entry->wait_amount, __entry->until)
|
2021-05-26 05:03:35 +00:00
|
|
|
);
|
|
|
|
|
2022-08-18 21:00:12 +00:00
|
|
|
/* btree transactions: */
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DECLARE_EVENT_CLASS(transaction_event,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip),
|
2019-04-23 04:10:08 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2021-06-04 19:18:10 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
2019-04-23 04:10:08 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2021-06-04 19:18:10 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
2019-04-23 04:10:08 +00:00
|
|
|
),
|
|
|
|
|
2022-01-04 05:33:52 +00:00
|
|
|
TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
|
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(transaction_event, transaction_commit,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-04-17 22:06:31 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2022-04-17 22:06:31 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_injected,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-07-18 00:22:30 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2022-07-18 00:22:30 +00:00
|
|
|
);
|
|
|
|
|
2023-03-30 20:04:02 +00:00
|
|
|
TRACE_EVENT(trans_restart_split_race,
|
2023-02-20 21:41:03 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2023-03-30 20:04:02 +00:00
|
|
|
unsigned long caller_ip,
|
|
|
|
struct btree *b),
|
|
|
|
TP_ARGS(trans, caller_ip, b),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(u8, level )
|
|
|
|
__field(u16, written )
|
|
|
|
__field(u16, blocks )
|
|
|
|
__field(u16, u64s_remaining )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->level = b->c.level;
|
|
|
|
__entry->written = b->written;
|
|
|
|
__entry->blocks = btree_blocks(trans->c);
|
2024-01-16 18:29:59 +00:00
|
|
|
__entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
|
2023-03-30 20:04:02 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
|
|
|
|
__entry->trans_fn, (void *) __entry->caller_ip,
|
|
|
|
__entry->level,
|
|
|
|
__entry->written, __entry->blocks,
|
|
|
|
__entry->u64s_remaining)
|
2023-02-20 21:41:03 +00:00
|
|
|
);
|
|
|
|
|
2024-08-10 18:31:17 +00:00
|
|
|
TRACE_EVENT(trans_blocked_journal_reclaim,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip),
|
2024-08-10 18:31:17 +00:00
|
|
|
TP_ARGS(trans, caller_ip),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
|
|
|
|
__field(unsigned long, key_cache_nr_keys )
|
|
|
|
__field(unsigned long, key_cache_nr_dirty )
|
|
|
|
__field(long, must_wait )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
|
|
|
|
__entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
|
|
|
|
__entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
|
|
|
|
__entry->trans_fn, (void *) __entry->caller_ip,
|
|
|
|
__entry->key_cache_nr_keys,
|
|
|
|
__entry->key_cache_nr_dirty,
|
|
|
|
__entry->must_wait)
|
2019-04-23 04:10:08 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:23:38 +00:00
|
|
|
TRACE_EVENT(trans_restart_journal_preres_get,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-08-27 16:23:38 +00:00
|
|
|
unsigned long caller_ip,
|
|
|
|
unsigned flags),
|
|
|
|
TP_ARGS(trans, caller_ip, flags),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(unsigned, flags )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-08-27 16:23:38 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->flags = flags;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS %x", __entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
|
|
|
__entry->flags)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_traverse_all,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-04-17 22:06:31 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2023-11-26 22:02:06 +00:00
|
|
|
unsigned long caller_ip,
|
|
|
|
const char *paths),
|
|
|
|
TP_ARGS(trans, caller_ip, paths)
|
2022-07-05 21:27:44 +00:00
|
|
|
);
|
|
|
|
|
2021-06-04 19:18:10 +00:00
|
|
|
DECLARE_EVENT_CLASS(transaction_restart_iter,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path),
|
2021-06-04 19:18:10 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2022-02-16 05:42:34 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
2021-06-04 19:18:10 +00:00
|
|
|
__field(u8, btree_id )
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_entries(pos)
|
2021-06-04 19:18:10 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-02-16 05:42:34 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos)
|
2021-06-04 19:18:10 +00:00
|
|
|
),
|
|
|
|
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_printk("%s %pS btree %s pos %llu:%llu:%u",
|
2022-01-04 05:33:52 +00:00
|
|
|
__entry->trans_fn,
|
2022-02-16 05:42:34 +00:00
|
|
|
(void *) __entry->caller_ip,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2021-06-04 19:18:10 +00:00
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-09-17 18:36:24 +00:00
|
|
|
TRACE_EVENT(trans_restart_upgrade,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip,
|
2022-09-17 18:36:24 +00:00
|
|
|
struct btree_path *path,
|
|
|
|
unsigned old_locks_want,
|
2023-10-27 19:23:46 +00:00
|
|
|
unsigned new_locks_want,
|
|
|
|
struct get_locks_fail *f),
|
|
|
|
TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
|
2021-06-04 19:18:10 +00:00
|
|
|
|
2022-09-17 18:36:24 +00:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
__field(u8, old_locks_want )
|
|
|
|
__field(u8, new_locks_want )
|
2023-10-27 19:23:46 +00:00
|
|
|
__field(u8, level )
|
|
|
|
__field(u32, path_seq )
|
|
|
|
__field(u32, node_seq )
|
2022-09-17 18:36:24 +00:00
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
__entry->old_locks_want = old_locks_want;
|
|
|
|
__entry->new_locks_want = new_locks_want;
|
2023-10-27 19:23:46 +00:00
|
|
|
__entry->level = f->l;
|
|
|
|
__entry->path_seq = path->l[f->l].lock_seq;
|
|
|
|
__entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
|
2022-09-17 18:36:24 +00:00
|
|
|
TRACE_BPOS_assign(pos, path->pos)
|
|
|
|
),
|
|
|
|
|
2023-12-10 21:12:24 +00:00
|
|
|
TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2022-09-17 18:36:24 +00:00
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
|
|
|
__entry->old_locks_want,
|
2023-10-27 19:23:46 +00:00
|
|
|
__entry->new_locks_want,
|
|
|
|
__entry->level,
|
|
|
|
__entry->path_seq,
|
2023-12-10 21:12:24 +00:00
|
|
|
__entry->node_seq)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2024-01-16 01:40:06 +00:00
|
|
|
DEFINE_EVENT(trans_str, trans_restart_relock,
|
|
|
|
TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
|
|
|
|
TP_ARGS(trans, caller_ip, str)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-01-09 03:59:58 +00:00
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
2022-08-27 16:48:36 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-08-07 17:43:32 +00:00
|
|
|
unsigned long caller_ip),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip)
|
2022-08-07 17:43:32 +00:00
|
|
|
);
|
|
|
|
|
2022-01-09 03:59:58 +00:00
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-09 03:59:58 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-01-09 03:59:58 +00:00
|
|
|
);
|
|
|
|
|
2021-06-04 19:18:10 +00:00
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2021-06-04 19:18:10 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2021-06-04 19:18:10 +00:00
|
|
|
);
|
|
|
|
|
2022-02-18 05:47:45 +00:00
|
|
|
DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-02-18 05:47:45 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, caller_ip, path)
|
2022-02-18 05:47:45 +00:00
|
|
|
);
|
|
|
|
|
2024-01-04 23:59:17 +00:00
|
|
|
DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2023-05-26 20:59:07 +00:00
|
|
|
const char *cycle),
|
2024-01-04 23:59:17 +00:00
|
|
|
TP_ARGS(trans, cycle)
|
2019-04-23 04:10:08 +00:00
|
|
|
);
|
|
|
|
|
bcachefs: Deadlock cycle detector
We've outgrown our own deadlock avoidance strategy.
The btree iterator API provides an interface where the user doesn't need
to concern themselves with lock ordering - different btree iterators can
be traversed in any order. Without special care, this will lead to
deadlocks.
Our previous strategy was to define a lock ordering internally, and
whenever we attempt to take a lock and trylock() fails, we'd check if
the current btree transaction is holding any locks that cause a lock
ordering violation. If so, we'd issue a transaction restart, and then
bch2_trans_begin() would re-traverse all previously used iterators, but
in the correct order.
That approach had some issues, though.
- Sometimes we'd issue transaction restarts unnecessarily, when no
deadlock would have actually occured. Lock ordering restarts have
become our primary cause of transaction restarts, on some workloads
totally 20% of actual transaction commits.
- To avoid deadlock or livelock, we'd often have to take intent locks
when we only wanted a read lock: with the lock ordering approach, it
is actually illegal to hold _any_ read lock while blocking on an intent
lock, and this has been causing us unnecessary lock contention.
- It was getting fragile - the various lock ordering rules are not
trivial, and we'd been seeing occasional livelock issues related to
this machinery.
So, since bcachefs is already a relational database masquerading as a
filesystem, we're stealing the next traditional database technique and
switching to a cycle detector for avoiding deadlocks.
When we block taking a btree lock, after adding ourself to the waitlist
but before sleeping, we do a DFS of btree transactions waiting on other
btree transactions, starting with the current transaction and walking
our held locks, and transactions blocking on our held locks.
If we find a cycle, we emit a transaction restart. Occasionally (e.g.
the btree split path) we can not allow the lock() operation to fail, so
if necessary we'll tell another transaction that it has to fail.
Result: trans_restart_would_deadlock events are reduced by a factor of
10 to 100, and we'll be able to delete a whole bunch of grotty, fragile
code.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
2022-08-22 17:23:47 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
unsigned long caller_ip),
|
|
|
|
TP_ARGS(trans, caller_ip)
|
|
|
|
);
|
|
|
|
|
2021-09-08 01:25:32 +00:00
|
|
|
TRACE_EVENT(trans_restart_would_deadlock_write,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans),
|
|
|
|
TP_ARGS(trans),
|
2021-09-08 01:25:32 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2021-09-08 01:25:32 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2021-09-08 01:25:32 +00:00
|
|
|
),
|
|
|
|
|
2022-01-04 05:33:52 +00:00
|
|
|
TP_printk("%s", __entry->trans_fn)
|
2021-09-08 01:25:32 +00:00
|
|
|
);
|
|
|
|
|
2019-05-15 14:54:43 +00:00
|
|
|
TRACE_EVENT(trans_restart_mem_realloced,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-01-04 05:33:52 +00:00
|
|
|
unsigned long caller_ip,
|
2021-04-15 16:50:09 +00:00
|
|
|
unsigned long bytes),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip, bytes),
|
2019-05-15 14:54:43 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2021-04-15 16:50:09 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(unsigned long, bytes )
|
2019-05-15 14:54:43 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2021-04-15 16:50:09 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->bytes = bytes;
|
2019-05-15 14:54:43 +00:00
|
|
|
),
|
|
|
|
|
2022-01-04 05:33:52 +00:00
|
|
|
TP_printk("%s %pS bytes %lu",
|
|
|
|
__entry->trans_fn,
|
2021-04-15 16:50:09 +00:00
|
|
|
(void *) __entry->caller_ip,
|
|
|
|
__entry->bytes)
|
2019-04-23 04:10:08 +00:00
|
|
|
);
|
|
|
|
|
2022-03-18 01:35:51 +00:00
|
|
|
TRACE_EVENT(trans_restart_key_cache_key_realloced,
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans,
|
2022-02-27 16:34:21 +00:00
|
|
|
unsigned long caller_ip,
|
2022-08-10 16:42:55 +00:00
|
|
|
struct btree_path *path,
|
2022-03-18 01:35:51 +00:00
|
|
|
unsigned old_u64s,
|
|
|
|
unsigned new_u64s),
|
2022-08-10 16:42:55 +00:00
|
|
|
TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
|
2022-03-18 01:35:51 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2022-08-27 16:11:18 +00:00
|
|
|
__array(char, trans_fn, 32 )
|
2022-03-18 01:35:51 +00:00
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(enum btree_id, btree_id )
|
2022-08-10 16:42:55 +00:00
|
|
|
TRACE_BPOS_entries(pos)
|
2022-03-18 01:35:51 +00:00
|
|
|
__field(u32, old_u64s )
|
|
|
|
__field(u32, new_u64s )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-10-22 19:59:53 +00:00
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
2022-03-18 01:35:51 +00:00
|
|
|
__entry->old_u64s = old_u64s;
|
|
|
|
__entry->new_u64s = new_u64s;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
|
|
|
|
__entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
2023-10-20 02:49:08 +00:00
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
2022-08-07 17:43:32 +00:00
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
2022-08-10 16:42:55 +00:00
|
|
|
__entry->pos_snapshot,
|
|
|
|
__entry->old_u64s,
|
|
|
|
__entry->new_u64s)
|
2022-08-07 17:43:32 +00:00
|
|
|
);
|
|
|
|
|
2023-10-27 19:23:46 +00:00
|
|
|
TRACE_EVENT(path_downgrade,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
unsigned long caller_ip,
|
2023-11-13 02:47:15 +00:00
|
|
|
struct btree_path *path,
|
|
|
|
unsigned old_locks_want),
|
|
|
|
TP_ARGS(trans, caller_ip, path, old_locks_want),
|
2023-10-27 19:23:46 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
2023-11-13 02:47:15 +00:00
|
|
|
__field(unsigned, old_locks_want )
|
|
|
|
__field(unsigned, new_locks_want )
|
|
|
|
__field(unsigned, btree )
|
|
|
|
TRACE_BPOS_entries(pos)
|
2023-10-27 19:23:46 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
2023-11-13 02:47:15 +00:00
|
|
|
__entry->old_locks_want = old_locks_want;
|
|
|
|
__entry->new_locks_want = path->locks_want;
|
|
|
|
__entry->btree = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
2023-10-27 19:23:46 +00:00
|
|
|
),
|
|
|
|
|
2023-11-13 02:47:15 +00:00
|
|
|
TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
|
2023-10-27 19:23:46 +00:00
|
|
|
__entry->trans_fn,
|
2023-11-13 02:47:15 +00:00
|
|
|
(void *) __entry->caller_ip,
|
|
|
|
__entry->old_locks_want,
|
|
|
|
__entry->new_locks_want,
|
|
|
|
bch2_btree_id_str(__entry->btree),
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot)
|
2023-10-27 19:23:46 +00:00
|
|
|
);
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
unsigned long caller_ip),
|
|
|
|
TP_ARGS(trans, caller_ip)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(write_buffer_flush,
|
|
|
|
TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
|
|
|
|
TP_ARGS(trans, nr, skipped, fast, size),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(size_t, nr )
|
|
|
|
__field(size_t, skipped )
|
|
|
|
__field(size_t, fast )
|
|
|
|
__field(size_t, size )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->nr = nr;
|
|
|
|
__entry->skipped = skipped;
|
|
|
|
__entry->fast = fast;
|
|
|
|
__entry->size = size;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%zu/%zu skipped %zu fast %zu",
|
|
|
|
__entry->nr, __entry->size, __entry->skipped, __entry->fast)
|
|
|
|
);
|
|
|
|
|
2023-11-03 02:31:16 +00:00
|
|
|
TRACE_EVENT(write_buffer_flush_sync,
|
|
|
|
TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
|
|
|
|
TP_ARGS(trans, caller_ip),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
|
|
|
|
);
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
TRACE_EVENT(write_buffer_flush_slowpath,
|
2023-11-03 02:31:16 +00:00
|
|
|
TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
|
|
|
|
TP_ARGS(trans, slowpath, total),
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-11-03 02:31:16 +00:00
|
|
|
__field(size_t, slowpath )
|
|
|
|
__field(size_t, total )
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2023-11-03 02:31:16 +00:00
|
|
|
__entry->slowpath = slowpath;
|
|
|
|
__entry->total = total;
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
),
|
|
|
|
|
2023-11-03 02:31:16 +00:00
|
|
|
TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, rebalance_extent,
|
2023-11-25 02:52:17 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
|
|
|
);
|
|
|
|
|
2023-11-26 22:02:06 +00:00
|
|
|
DEFINE_EVENT(fs_str, data_update,
|
2023-11-25 02:52:17 +00:00
|
|
|
TP_PROTO(struct bch_fs *c, const char *str),
|
|
|
|
TP_ARGS(c, str)
|
|
|
|
);
|
|
|
|
|
2024-02-22 03:10:09 +00:00
|
|
|
TRACE_EVENT(error_downcast,
|
|
|
|
TP_PROTO(int bch_err, int std_err, unsigned long ip),
|
|
|
|
TP_ARGS(bch_err, std_err, ip),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, bch_err, 32 )
|
|
|
|
__array(char, std_err, 32 )
|
|
|
|
__array(char, ip, 32 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
|
|
|
|
strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
|
|
|
|
snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
|
|
|
|
);
|
|
|
|
|
2022-08-10 23:57:46 +00:00
|
|
|
#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
|
|
|
|
|
|
|
|
TRACE_EVENT(update_by_path,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
struct btree_insert_entry *i, bool overwrite),
|
|
|
|
TP_ARGS(trans, path, i, overwrite),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(btree_path_idx_t, path_idx )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
__field(u8, overwrite )
|
|
|
|
__field(btree_path_idx_t, update_idx )
|
|
|
|
__field(btree_path_idx_t, nr_updates )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->path_idx = path - trans->paths;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
__entry->overwrite = overwrite;
|
|
|
|
__entry->update_idx = i - trans->updates;
|
|
|
|
__entry->nr_updates = trans->nr_updates;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
|
|
|
|
__entry->trans_fn,
|
|
|
|
__entry->path_idx,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
|
|
|
__entry->overwrite,
|
|
|
|
__entry->update_idx,
|
|
|
|
__entry->nr_updates)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_lock,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
unsigned long caller_ip,
|
|
|
|
struct btree_bkey_cached_common *b),
|
|
|
|
TP_ARGS(trans, caller_ip, b),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(unsigned long, caller_ip )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
__field(u8, level )
|
|
|
|
__array(char, node, 24 )
|
|
|
|
__field(u32, lock_seq )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
__entry->caller_ip = caller_ip;
|
|
|
|
__entry->btree_id = b->btree_id;
|
|
|
|
__entry->level = b->level;
|
|
|
|
|
|
|
|
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
|
|
|
|
__entry->lock_seq = six_lock_seq(&b->lock);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
|
|
|
|
__entry->trans_fn,
|
|
|
|
(void *) __entry->caller_ip,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->level,
|
|
|
|
__entry->node,
|
|
|
|
__entry->lock_seq)
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(btree_path_ev,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u16, idx )
|
|
|
|
__field(u8, ref )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->ref = path->ref;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
|
|
|
|
__entry->idx, __entry->ref,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_alloc,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, locks_want )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->locks_want = path->locks_want;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
|
|
|
|
__entry->idx,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->locks_want,
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_get,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
|
|
|
|
TP_ARGS(trans, path, new_pos),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, ref )
|
|
|
|
__field(u8, preserve )
|
|
|
|
__field(u8, locks_want )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(old_pos)
|
|
|
|
TRACE_BPOS_entries(new_pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->ref = path->ref;
|
|
|
|
__entry->preserve = path->preserve;
|
|
|
|
__entry->locks_want = path->locks_want;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(old_pos, path->pos);
|
|
|
|
TRACE_BPOS_assign(new_pos, *new_pos);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
|
|
|
|
__entry->idx,
|
|
|
|
__entry->ref,
|
|
|
|
__entry->preserve,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->locks_want,
|
|
|
|
__entry->old_pos_inode,
|
|
|
|
__entry->old_pos_offset,
|
|
|
|
__entry->old_pos_snapshot,
|
|
|
|
__entry->new_pos_inode,
|
|
|
|
__entry->new_pos_offset,
|
|
|
|
__entry->new_pos_snapshot)
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(btree_path_clone,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
|
|
|
TP_ARGS(trans, path, new),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, new_idx )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
__field(u8, ref )
|
|
|
|
__field(u8, preserve )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->new_idx = new - trans->paths;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
__entry->ref = path->ref;
|
|
|
|
__entry->preserve = path->preserve;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
|
|
|
|
__entry->idx,
|
|
|
|
__entry->ref,
|
|
|
|
__entry->preserve,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
|
|
|
__entry->new_idx)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_clone, btree_path_clone,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
|
|
|
TP_ARGS(trans, path, new)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
|
|
|
TP_ARGS(trans, path, new)
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(btree_path_traverse,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__array(char, trans_fn, 32 )
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, ref )
|
|
|
|
__field(u8, preserve )
|
|
|
|
__field(u8, should_be_locked )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
__field(u8, level )
|
|
|
|
TRACE_BPOS_entries(pos)
|
|
|
|
__field(u8, locks_want )
|
|
|
|
__field(u8, nodes_locked )
|
|
|
|
__array(char, node0, 24 )
|
|
|
|
__array(char, node1, 24 )
|
|
|
|
__array(char, node2, 24 )
|
|
|
|
__array(char, node3, 24 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
|
|
|
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->ref = path->ref;
|
|
|
|
__entry->preserve = path->preserve;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
__entry->level = path->level;
|
|
|
|
TRACE_BPOS_assign(pos, path->pos);
|
|
|
|
|
|
|
|
__entry->locks_want = path->locks_want;
|
|
|
|
__entry->nodes_locked = path->nodes_locked;
|
|
|
|
struct btree *b = path->l[0].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[1].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[2].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[3].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
|
|
|
|
"locks %u %u %u %u node %s %s %s %s",
|
|
|
|
__entry->trans_fn,
|
|
|
|
__entry->idx,
|
|
|
|
__entry->ref,
|
|
|
|
__entry->preserve,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->pos_inode,
|
|
|
|
__entry->pos_offset,
|
|
|
|
__entry->pos_snapshot,
|
|
|
|
__entry->level,
|
|
|
|
__entry->locks_want,
|
|
|
|
(__entry->nodes_locked >> 6) & 3,
|
|
|
|
(__entry->nodes_locked >> 4) & 3,
|
|
|
|
(__entry->nodes_locked >> 2) & 3,
|
|
|
|
(__entry->nodes_locked >> 0) & 3,
|
|
|
|
__entry->node3,
|
|
|
|
__entry->node2,
|
|
|
|
__entry->node1,
|
|
|
|
__entry->node0)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
|
|
|
|
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
|
|
|
TP_ARGS(trans, path)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_set_pos,
|
|
|
|
TP_PROTO(struct btree_trans *trans,
|
|
|
|
struct btree_path *path,
|
|
|
|
struct bpos *new_pos),
|
|
|
|
TP_ARGS(trans, path, new_pos),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, ref )
|
|
|
|
__field(u8, preserve )
|
|
|
|
__field(u8, btree_id )
|
|
|
|
TRACE_BPOS_entries(old_pos)
|
|
|
|
TRACE_BPOS_entries(new_pos)
|
|
|
|
__field(u8, locks_want )
|
|
|
|
__field(u8, nodes_locked )
|
|
|
|
__array(char, node0, 24 )
|
|
|
|
__array(char, node1, 24 )
|
|
|
|
__array(char, node2, 24 )
|
|
|
|
__array(char, node3, 24 )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path - trans->paths;
|
|
|
|
__entry->ref = path->ref;
|
|
|
|
__entry->preserve = path->preserve;
|
|
|
|
__entry->btree_id = path->btree_id;
|
|
|
|
TRACE_BPOS_assign(old_pos, path->pos);
|
|
|
|
TRACE_BPOS_assign(new_pos, *new_pos);
|
|
|
|
|
|
|
|
__entry->nodes_locked = path->nodes_locked;
|
|
|
|
struct btree *b = path->l[0].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[1].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[2].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
b = path->l[3].b;
|
|
|
|
if (IS_ERR(b))
|
|
|
|
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
|
|
|
else
|
|
|
|
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
|
|
|
|
"locks %u %u %u %u node %s %s %s %s",
|
|
|
|
__entry->idx,
|
|
|
|
__entry->ref,
|
|
|
|
__entry->preserve,
|
|
|
|
bch2_btree_id_str(__entry->btree_id),
|
|
|
|
__entry->old_pos_inode,
|
|
|
|
__entry->old_pos_offset,
|
|
|
|
__entry->old_pos_snapshot,
|
|
|
|
__entry->new_pos_inode,
|
|
|
|
__entry->new_pos_offset,
|
|
|
|
__entry->new_pos_snapshot,
|
|
|
|
(__entry->nodes_locked >> 6) & 3,
|
|
|
|
(__entry->nodes_locked >> 4) & 3,
|
|
|
|
(__entry->nodes_locked >> 2) & 3,
|
|
|
|
(__entry->nodes_locked >> 0) & 3,
|
|
|
|
__entry->node3,
|
|
|
|
__entry->node2,
|
|
|
|
__entry->node1,
|
|
|
|
__entry->node0)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_free,
|
|
|
|
TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
|
|
|
|
TP_ARGS(trans, path, dup),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
__field(u8, preserve )
|
|
|
|
__field(u8, should_be_locked)
|
|
|
|
__field(s8, dup )
|
|
|
|
__field(u8, dup_locked )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path;
|
|
|
|
__entry->preserve = trans->paths[path].preserve;
|
|
|
|
__entry->should_be_locked = trans->paths[path].should_be_locked;
|
|
|
|
__entry->dup = dup ? dup - trans->paths : -1;
|
|
|
|
__entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
|
|
|
|
__entry->preserve ? 'P' : ' ',
|
|
|
|
__entry->should_be_locked ? 'S' : ' ',
|
|
|
|
__entry->dup,
|
|
|
|
__entry->dup_locked)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(btree_path_free_trans_begin,
|
|
|
|
TP_PROTO(btree_path_idx_t path),
|
|
|
|
TP_ARGS(path),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(btree_path_idx_t, idx )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->idx = path;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk(" path %3u", __entry->idx)
|
|
|
|
);
|
|
|
|
|
|
|
|
#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
|
|
|
|
#ifndef _TRACE_BCACHEFS_H
|
|
|
|
|
|
|
|
static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
|
|
|
|
struct btree_insert_entry *i, bool overwrite) {}
|
|
|
|
static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
|
|
|
|
static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
|
|
|
|
static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
|
|
|
|
static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
|
|
|
|
static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
|
|
|
|
static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
|
|
|
|
static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
|
|
|
|
static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
|
|
|
|
|
|
|
|
#define _TRACE_BCACHEFS_H
|
2017-03-17 06:18:50 +00:00
|
|
|
#endif /* _TRACE_BCACHEFS_H */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#undef TRACE_INCLUDE_PATH
|
|
|
|
#define TRACE_INCLUDE_PATH ../../fs/bcachefs
|
|
|
|
|
|
|
|
#undef TRACE_INCLUDE_FILE
|
|
|
|
#define TRACE_INCLUDE_FILE trace
|
|
|
|
|
|
|
|
#include <trace/define_trace.h>
|