2017-03-17 06:18:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Code for manipulating bucket marks for garbage collection.
|
|
|
|
*
|
|
|
|
* Copyright 2014 Datera, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _BUCKETS_H
|
|
|
|
#define _BUCKETS_H
|
|
|
|
|
|
|
|
#include "buckets_types.h"
|
2022-04-03 21:50:01 +00:00
|
|
|
#include "extents.h"
|
2023-08-05 19:40:21 +00:00
|
|
|
#include "sb-members.h"
|
|
|
|
|
|
|
|
static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
|
|
|
|
{
|
|
|
|
return div_u64(s, ca->mi.bucket_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
|
|
|
|
{
|
|
|
|
return ((sector_t) b) * ca->mi.bucket_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
|
|
|
|
{
|
|
|
|
u32 remainder;
|
|
|
|
|
|
|
|
div_u64_rem(s, ca->mi.bucket_size, &remainder);
|
|
|
|
return remainder;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
|
|
|
|
u32 *offset)
|
|
|
|
{
|
|
|
|
return div_u64_rem(s, ca->mi.bucket_size, offset);
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
#define for_each_bucket(_b, _buckets) \
|
|
|
|
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
|
|
|
|
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
|
|
|
|
|
2023-09-14 00:33:06 +00:00
|
|
|
/*
|
|
|
|
* Ugly hack alert:
|
|
|
|
*
|
|
|
|
* We need to cram a spinlock in a single byte, because that's what we have left
|
|
|
|
* in struct bucket, and we care about the size of these - during fsck, we need
|
|
|
|
* in memory state for every single bucket on every device.
|
|
|
|
*
|
|
|
|
* We used to do
|
|
|
|
* while (xchg(&b->lock, 1) cpu_relax();
|
|
|
|
* but, it turns out not all architectures support xchg on a single byte.
|
|
|
|
*
|
|
|
|
* So now we use bit_spin_lock(), with fun games since we can't burn a whole
|
|
|
|
* ulong for this - we just need to make sure the lock bit always ends up in the
|
|
|
|
* first byte.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
#define BUCKET_LOCK_BITNR 0
|
|
|
|
#else
|
|
|
|
#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
union ulong_byte_assert {
|
|
|
|
ulong ulong;
|
|
|
|
u8 byte;
|
|
|
|
};
|
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
static inline void bucket_unlock(struct bucket *b)
|
|
|
|
{
|
2023-09-14 00:33:06 +00:00
|
|
|
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
|
2023-09-27 23:51:29 +00:00
|
|
|
|
|
|
|
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
|
|
|
|
wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
|
2022-02-14 05:07:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void bucket_lock(struct bucket *b)
|
|
|
|
{
|
2023-09-27 23:51:29 +00:00
|
|
|
wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
2022-02-14 05:07:38 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-02-11 00:26:55 +00:00
|
|
|
static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2022-02-11 00:26:55 +00:00
|
|
|
return rcu_dereference_check(ca->buckets_gc,
|
2017-03-17 06:18:50 +00:00
|
|
|
!ca->fs ||
|
2018-11-26 05:13:33 +00:00
|
|
|
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
|
2017-03-17 06:18:50 +00:00
|
|
|
lockdep_is_held(&ca->fs->gc_lock) ||
|
|
|
|
lockdep_is_held(&ca->bucket_lock));
|
|
|
|
}
|
|
|
|
|
2022-02-11 00:26:55 +00:00
|
|
|
static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2022-02-11 00:26:55 +00:00
|
|
|
struct bucket_array *buckets = gc_bucket_array(ca);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
|
|
|
|
return buckets->b + b;
|
|
|
|
}
|
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
return rcu_dereference_check(ca->bucket_gens,
|
|
|
|
!ca->fs ||
|
|
|
|
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
|
|
|
|
lockdep_is_held(&ca->fs->gc_lock) ||
|
|
|
|
lockdep_is_held(&ca->bucket_lock));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
|
|
|
|
{
|
|
|
|
struct bucket_gens *gens = bucket_gens(ca);
|
|
|
|
|
|
|
|
BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
|
|
|
|
return gens->b + b;
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
|
|
|
return sector_to_bucket(ca, ptr->offset);
|
|
|
|
}
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
|
|
|
|
|
|
|
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
|
|
|
|
}
|
|
|
|
|
2022-03-18 00:51:27 +00:00
|
|
|
static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
|
|
|
|
const struct bch_extent_ptr *ptr,
|
|
|
|
u32 *bucket_offset)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
|
|
|
|
|
|
|
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
|
|
|
|
}
|
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-12-26 03:37:19 +00:00
|
|
|
return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 15:34:01 +00:00
|
|
|
static inline enum bch_data_type ptr_data_type(const struct bkey *k,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
2022-04-03 21:50:01 +00:00
|
|
|
if (bkey_is_btree_ptr(k))
|
2020-07-09 22:28:11 +00:00
|
|
|
return BCH_DATA_btree;
|
2019-08-29 15:34:01 +00:00
|
|
|
|
2020-07-09 22:28:11 +00:00
|
|
|
return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
|
2019-08-29 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2022-03-18 00:51:27 +00:00
|
|
|
static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
|
|
|
|
{
|
|
|
|
EBUG_ON(sectors < 0);
|
|
|
|
|
|
|
|
return crc_is_compressed(p.crc)
|
|
|
|
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
|
|
|
|
p.crc.uncompressed_size)
|
|
|
|
: sectors;
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
static inline int gen_cmp(u8 a, u8 b)
|
|
|
|
{
|
|
|
|
return (s8) (a - b);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int gen_after(u8 a, u8 b)
|
|
|
|
{
|
|
|
|
int r = gen_cmp(a, b);
|
|
|
|
|
|
|
|
return r > 0 ? r : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ptr_stale() - check if a pointer points into a bucket that has been
|
|
|
|
* invalidated.
|
|
|
|
*/
|
|
|
|
static inline u8 ptr_stale(struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
2021-12-24 08:08:06 +00:00
|
|
|
u8 ret;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2021-12-26 00:55:34 +00:00
|
|
|
ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
|
2021-12-24 08:08:06 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Device usage: */
|
|
|
|
|
2022-10-21 18:01:19 +00:00
|
|
|
void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
|
|
|
|
static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
struct bch_dev_usage ret;
|
|
|
|
|
|
|
|
bch2_dev_usage_read_fast(ca, &ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
void bch2_dev_usage_init(struct bch_dev *);
|
2023-11-23 23:25:31 +00:00
|
|
|
void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-06-24 23:30:10 +00:00
|
|
|
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
|
2022-01-10 01:48:31 +00:00
|
|
|
{
|
|
|
|
s64 reserved = 0;
|
|
|
|
|
2023-06-24 23:30:10 +00:00
|
|
|
switch (watermark) {
|
|
|
|
case BCH_WATERMARK_NR:
|
2023-09-13 21:08:29 +00:00
|
|
|
BUG();
|
2023-06-24 23:30:10 +00:00
|
|
|
case BCH_WATERMARK_stripe:
|
2023-03-02 06:54:17 +00:00
|
|
|
reserved += ca->mi.nbuckets >> 6;
|
|
|
|
fallthrough;
|
2023-06-24 23:30:10 +00:00
|
|
|
case BCH_WATERMARK_normal:
|
2022-01-10 01:48:31 +00:00
|
|
|
reserved += ca->mi.nbuckets >> 6;
|
|
|
|
fallthrough;
|
2023-06-24 23:30:10 +00:00
|
|
|
case BCH_WATERMARK_copygc:
|
2022-01-10 01:48:31 +00:00
|
|
|
reserved += ca->nr_btree_reserve;
|
|
|
|
fallthrough;
|
2023-06-24 23:30:10 +00:00
|
|
|
case BCH_WATERMARK_btree:
|
2022-01-10 01:48:31 +00:00
|
|
|
reserved += ca->nr_btree_reserve;
|
|
|
|
fallthrough;
|
2023-06-24 23:30:10 +00:00
|
|
|
case BCH_WATERMARK_btree_copygc:
|
2023-06-27 21:29:20 +00:00
|
|
|
case BCH_WATERMARK_reclaim:
|
2022-01-10 01:48:31 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
return reserved;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-06-14 21:51:20 +00:00
|
|
|
static inline u64 dev_buckets_free(struct bch_dev *ca,
|
|
|
|
struct bch_dev_usage usage,
|
2023-06-24 23:30:10 +00:00
|
|
|
enum bch_watermark watermark)
|
2022-06-14 21:51:20 +00:00
|
|
|
{
|
|
|
|
return max_t(s64, 0,
|
|
|
|
usage.d[BCH_DATA_free].buckets -
|
|
|
|
ca->nr_open_buckets -
|
2023-06-24 23:30:10 +00:00
|
|
|
bch2_dev_buckets_reserved(ca, watermark));
|
2022-06-14 21:51:20 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
|
|
|
struct bch_dev_usage usage,
|
2023-06-24 23:30:10 +00:00
|
|
|
enum bch_watermark watermark)
|
2022-04-01 05:29:59 +00:00
|
|
|
{
|
2022-01-10 01:48:31 +00:00
|
|
|
return max_t(s64, 0,
|
2022-06-22 22:28:30 +00:00
|
|
|
usage.d[BCH_DATA_free].buckets
|
|
|
|
+ usage.d[BCH_DATA_cached].buckets
|
|
|
|
+ usage.d[BCH_DATA_need_gc_gens].buckets
|
|
|
|
+ usage.d[BCH_DATA_need_discard].buckets
|
|
|
|
- ca->nr_open_buckets
|
2023-06-24 23:30:10 +00:00
|
|
|
- bch2_dev_buckets_reserved(ca, watermark));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-01-10 01:48:31 +00:00
|
|
|
static inline u64 dev_buckets_available(struct bch_dev *ca,
|
2023-06-24 23:30:10 +00:00
|
|
|
enum bch_watermark watermark)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2023-06-24 23:30:10 +00:00
|
|
|
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Filesystem usage: */
|
|
|
|
|
2023-05-22 18:39:44 +00:00
|
|
|
static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
|
|
|
|
{
|
|
|
|
return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
|
|
|
|
}
|
|
|
|
|
2019-02-15 01:39:17 +00:00
|
|
|
static inline unsigned fs_usage_u64s(struct bch_fs *c)
|
2019-01-21 20:32:13 +00:00
|
|
|
{
|
2023-05-22 18:39:44 +00:00
|
|
|
return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
|
|
|
|
{
|
|
|
|
return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
|
2019-02-15 01:39:17 +00:00
|
|
|
}
|
2019-01-21 20:32:13 +00:00
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
static inline unsigned dev_usage_u64s(void)
|
|
|
|
{
|
|
|
|
return sizeof(struct bch_dev_usage) / sizeof(u64);
|
|
|
|
}
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
|
|
|
|
|
|
|
|
void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
|
|
|
|
|
|
|
|
void bch2_fs_usage_to_text(struct printbuf *,
|
|
|
|
struct bch_fs *, struct bch_fs_usage_online *);
|
|
|
|
|
|
|
|
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
struct bch_fs_usage_short
|
|
|
|
bch2_fs_usage_read_short(struct bch_fs *);
|
2018-11-05 02:55:35 +00:00
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
/* key/bucket marking: */
|
|
|
|
|
2023-07-17 04:41:48 +00:00
|
|
|
static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
|
|
|
|
unsigned journal_seq,
|
|
|
|
bool gc)
|
|
|
|
{
|
|
|
|
percpu_rwsem_assert_held(&c->mark_lock);
|
|
|
|
BUG_ON(!gc && !journal_seq);
|
|
|
|
|
|
|
|
return this_cpu_ptr(gc
|
|
|
|
? c->usage_gc
|
|
|
|
: c->usage[journal_seq & JOURNAL_BUF_MASK]);
|
|
|
|
}
|
|
|
|
|
2023-11-11 22:40:45 +00:00
|
|
|
int bch2_update_replicas_list(struct btree_trans *,
|
|
|
|
struct bch_replicas_entry_v1 *, s64);
|
|
|
|
int bch2_update_cached_sectors_list(struct btree_trans *, unsigned, s64);
|
2023-07-17 04:41:48 +00:00
|
|
|
int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
|
|
|
|
|
2019-01-25 01:25:40 +00:00
|
|
|
void bch2_fs_usage_initialize(struct bch_fs *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-04-02 22:00:04 +00:00
|
|
|
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
|
|
|
size_t, enum bch_data_type, unsigned,
|
|
|
|
struct gc_pos, unsigned);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-03-02 03:14:31 +00:00
|
|
|
int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
|
2023-12-28 04:19:09 +00:00
|
|
|
struct bkey_s_c, struct bkey_s, unsigned);
|
2023-03-02 03:14:31 +00:00
|
|
|
int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned,
|
2023-12-28 04:19:09 +00:00
|
|
|
struct bkey_s_c, struct bkey_s, unsigned);
|
2023-03-02 03:14:31 +00:00
|
|
|
int bch2_mark_stripe(struct btree_trans *, enum btree_id, unsigned,
|
2023-12-28 04:19:09 +00:00
|
|
|
struct bkey_s_c, struct bkey_s, unsigned);
|
2023-03-02 03:14:31 +00:00
|
|
|
int bch2_mark_reservation(struct btree_trans *, enum btree_id, unsigned,
|
2023-12-28 04:19:09 +00:00
|
|
|
struct bkey_s_c, struct bkey_s, unsigned);
|
2022-03-13 05:26:52 +00:00
|
|
|
|
2023-12-28 04:19:09 +00:00
|
|
|
int bch2_trans_mark_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, unsigned);
|
|
|
|
int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, unsigned);
|
|
|
|
int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, unsigned);
|
2023-06-23 00:18:12 +00:00
|
|
|
#define mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
|
|
|
|
({ \
|
|
|
|
int ret = 0; \
|
|
|
|
\
|
|
|
|
if (_old.k->type) \
|
|
|
|
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
|
|
|
|
if (!ret && _new.k->type) \
|
2023-12-28 04:19:09 +00:00
|
|
|
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
|
2023-06-23 00:18:12 +00:00
|
|
|
ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags) \
|
2023-12-28 04:19:09 +00:00
|
|
|
mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)
|
2023-06-23 00:18:12 +00:00
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
|
2021-11-28 19:08:58 +00:00
|
|
|
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
|
2019-03-15 22:20:46 +00:00
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
|
|
|
|
size_t, enum bch_data_type, unsigned);
|
|
|
|
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
|
2023-10-21 17:54:39 +00:00
|
|
|
int bch2_trans_mark_dev_sbs(struct bch_fs *);
|
2021-01-22 22:56:34 +00:00
|
|
|
|
2023-08-05 19:40:21 +00:00
|
|
|
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
|
|
|
|
{
|
|
|
|
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
|
|
|
u64 b_offset = bucket_to_sector(ca, b);
|
|
|
|
u64 b_end = bucket_to_sector(ca, b + 1);
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
if (!b)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < layout->nr_superblocks; i++) {
|
|
|
|
u64 offset = le64_to_cpu(layout->sb_offset[i]);
|
|
|
|
u64 end = offset + (1 << layout->sb_max_size_bits);
|
|
|
|
|
|
|
|
if (!(offset >= b_end || end <= b_offset))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
/* disk reservations: */
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
static inline void bch2_disk_reservation_put(struct bch_fs *c,
|
|
|
|
struct disk_reservation *res)
|
|
|
|
{
|
2022-11-01 02:28:09 +00:00
|
|
|
if (res->sectors) {
|
|
|
|
this_cpu_sub(*c->online_reserved, res->sectors);
|
|
|
|
res->sectors = 0;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
|
|
|
|
|
2022-11-01 02:28:09 +00:00
|
|
|
int __bch2_disk_reservation_add(struct bch_fs *,
|
|
|
|
struct disk_reservation *,
|
|
|
|
u64, int);
|
|
|
|
|
|
|
|
static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
|
|
|
u64 sectors, int flags)
|
|
|
|
{
|
2022-11-14 01:01:42 +00:00
|
|
|
#ifdef __KERNEL__
|
2022-11-01 02:28:09 +00:00
|
|
|
u64 old, new;
|
|
|
|
|
|
|
|
do {
|
|
|
|
old = this_cpu_read(c->pcpu->sectors_available);
|
|
|
|
if (sectors > old)
|
|
|
|
return __bch2_disk_reservation_add(c, res, sectors, flags);
|
|
|
|
|
|
|
|
new = old - sectors;
|
|
|
|
} while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
|
|
|
|
|
|
|
|
this_cpu_add(*c->online_reserved, sectors);
|
|
|
|
res->sectors += sectors;
|
|
|
|
return 0;
|
2022-11-14 01:01:42 +00:00
|
|
|
#else
|
|
|
|
return __bch2_disk_reservation_add(c, res, sectors, flags);
|
|
|
|
#endif
|
2022-11-01 02:28:09 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
static inline struct disk_reservation
|
|
|
|
bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
|
|
|
|
{
|
|
|
|
return (struct disk_reservation) {
|
|
|
|
.sectors = 0,
|
|
|
|
#if 0
|
|
|
|
/* not used yet: */
|
|
|
|
.gen = c->capacity_gen,
|
|
|
|
#endif
|
|
|
|
.nr_replicas = nr_replicas,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bch2_disk_reservation_get(struct bch_fs *c,
|
|
|
|
struct disk_reservation *res,
|
2021-01-17 18:19:16 +00:00
|
|
|
u64 sectors, unsigned nr_replicas,
|
2017-03-17 06:18:50 +00:00
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
*res = bch2_disk_reservation_init(c, nr_replicas);
|
|
|
|
|
|
|
|
return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
|
|
|
|
}
|
|
|
|
|
2021-05-19 00:36:20 +00:00
|
|
|
#define RESERVE_FACTOR 6
|
|
|
|
|
|
|
|
static inline u64 avail_factor(u64 r)
|
|
|
|
{
|
|
|
|
return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
|
|
|
|
void bch2_dev_buckets_free(struct bch_dev *);
|
|
|
|
int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
|
|
|
|
|
|
|
|
#endif /* _BUCKETS_H */
|