2017-03-17 06:18:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Code for manipulating bucket marks for garbage collection.
|
|
|
|
*
|
|
|
|
* Copyright 2014 Datera, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2018-10-06 04:46:55 +00:00
|
|
|
#include "alloc_background.h"
|
2022-03-18 00:51:27 +00:00
|
|
|
#include "backpointers.h"
|
2018-11-05 07:31:48 +00:00
|
|
|
#include "bset.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "btree_gc.h"
|
2018-11-05 07:31:48 +00:00
|
|
|
#include "btree_update.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "buckets.h"
|
2022-01-05 03:32:09 +00:00
|
|
|
#include "buckets_waiting_for_journal.h"
|
2018-11-01 19:13:19 +00:00
|
|
|
#include "ec.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "error.h"
|
2021-10-30 01:14:23 +00:00
|
|
|
#include "inode.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "movinggc.h"
|
2021-10-19 16:27:47 +00:00
|
|
|
#include "recovery.h"
|
2021-05-23 06:31:33 +00:00
|
|
|
#include "reflink.h"
|
2019-01-21 20:32:13 +00:00
|
|
|
#include "replicas.h"
|
2021-03-16 04:42:25 +00:00
|
|
|
#include "subvolume.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
#include <linux/preempt.h>
|
|
|
|
|
2020-07-09 22:31:51 +00:00
|
|
|
static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
|
|
|
|
enum bch_data_type data_type,
|
|
|
|
s64 sectors)
|
|
|
|
{
|
|
|
|
switch (data_type) {
|
|
|
|
case BCH_DATA_btree:
|
|
|
|
fs_usage->btree += sectors;
|
|
|
|
break;
|
|
|
|
case BCH_DATA_user:
|
|
|
|
case BCH_DATA_parity:
|
|
|
|
fs_usage->data += sectors;
|
|
|
|
break;
|
|
|
|
case BCH_DATA_cached:
|
|
|
|
fs_usage->cached += sectors;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-25 01:25:40 +00:00
|
|
|
void bch2_fs_usage_initialize(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_fs_usage *usage;
|
2021-01-22 02:52:06 +00:00
|
|
|
struct bch_dev *ca;
|
2019-02-15 01:39:17 +00:00
|
|
|
unsigned i;
|
2019-01-25 01:25:40 +00:00
|
|
|
|
|
|
|
percpu_down_write(&c->mark_lock);
|
2019-02-11 00:34:47 +00:00
|
|
|
usage = c->usage_base;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
|
|
|
bch2_fs_usage_acc_to_base(c, i);
|
2019-01-25 01:25:40 +00:00
|
|
|
|
2019-02-10 00:20:57 +00:00
|
|
|
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
2019-02-14 23:38:52 +00:00
|
|
|
usage->reserved += usage->persistent_reserved[i];
|
2019-02-10 00:20:57 +00:00
|
|
|
|
2019-01-25 01:25:40 +00:00
|
|
|
for (i = 0; i < c->replicas.nr; i++) {
|
|
|
|
struct bch_replicas_entry *e =
|
|
|
|
cpu_replicas_entry(&c->replicas, i);
|
|
|
|
|
2020-07-09 22:31:51 +00:00
|
|
|
fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
|
2019-01-25 01:25:40 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
for_each_member_device(ca, c, i) {
|
|
|
|
struct bch_dev_usage dev = bch2_dev_usage_read(ca);
|
|
|
|
|
|
|
|
usage->hidden += (dev.d[BCH_DATA_sb].buckets +
|
|
|
|
dev.d[BCH_DATA_journal].buckets) *
|
|
|
|
ca->mi.bucket_size;
|
|
|
|
}
|
|
|
|
|
2019-01-25 01:25:40 +00:00
|
|
|
percpu_up_write(&c->mark_lock);
|
|
|
|
}
|
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
|
|
|
|
unsigned journal_seq,
|
|
|
|
bool gc)
|
|
|
|
{
|
2021-11-15 20:02:13 +00:00
|
|
|
BUG_ON(!gc && !journal_seq);
|
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
return this_cpu_ptr(gc
|
|
|
|
? ca->usage_gc
|
|
|
|
: ca->usage[journal_seq & JOURNAL_BUF_MASK]);
|
|
|
|
}
|
|
|
|
|
2022-10-21 18:01:19 +00:00
|
|
|
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-01-22 02:52:06 +00:00
|
|
|
struct bch_fs *c = ca->fs;
|
|
|
|
unsigned seq, i, u64s = dev_usage_u64s();
|
2019-02-14 23:38:52 +00:00
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
do {
|
|
|
|
seq = read_seqcount_begin(&c->usage_lock);
|
2022-10-21 18:01:19 +00:00
|
|
|
memcpy(usage, ca->usage_base, u64s * sizeof(u64));
|
2021-01-22 02:52:06 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
2022-10-21 18:01:19 +00:00
|
|
|
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
|
2021-01-22 02:52:06 +00:00
|
|
|
} while (read_seqcount_retry(&c->usage_lock, seq));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
|
|
|
|
unsigned journal_seq,
|
|
|
|
bool gc)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_rwsem_assert_held(&c->mark_lock);
|
2021-11-15 20:02:13 +00:00
|
|
|
BUG_ON(!gc && !journal_seq);
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
return this_cpu_ptr(gc
|
|
|
|
? c->usage_gc
|
2020-11-13 23:36:33 +00:00
|
|
|
: c->usage[journal_seq & JOURNAL_BUF_MASK]);
|
2019-02-11 00:34:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
|
|
|
|
{
|
|
|
|
ssize_t offset = v - (u64 *) c->usage_base;
|
2020-11-13 23:36:33 +00:00
|
|
|
unsigned i, seq;
|
2019-02-11 00:34:47 +00:00
|
|
|
u64 ret;
|
|
|
|
|
|
|
|
BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
|
|
|
|
percpu_rwsem_assert_held(&c->mark_lock);
|
|
|
|
|
|
|
|
do {
|
|
|
|
seq = read_seqcount_begin(&c->usage_lock);
|
2020-11-13 23:36:33 +00:00
|
|
|
ret = *v;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
|
|
|
ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
|
2019-02-11 00:34:47 +00:00
|
|
|
} while (read_seqcount_retry(&c->usage_lock, seq));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_fs_usage_online *ret;
|
2023-05-22 18:39:44 +00:00
|
|
|
unsigned nr_replicas = READ_ONCE(c->replicas.nr);
|
|
|
|
unsigned seq, i;
|
2020-11-13 23:36:33 +00:00
|
|
|
retry:
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
|
2020-11-13 23:36:33 +00:00
|
|
|
if (unlikely(!ret))
|
|
|
|
return NULL;
|
2019-01-21 20:32:13 +00:00
|
|
|
|
|
|
|
percpu_down_read(&c->mark_lock);
|
|
|
|
|
2023-05-22 18:39:44 +00:00
|
|
|
if (nr_replicas != c->replicas.nr) {
|
|
|
|
nr_replicas = c->replicas.nr;
|
2019-01-21 20:32:13 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2020-11-13 23:36:33 +00:00
|
|
|
kfree(ret);
|
|
|
|
goto retry;
|
2019-01-21 20:32:13 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
ret->online_reserved = percpu_u64_get(c->online_reserved);
|
|
|
|
|
|
|
|
do {
|
|
|
|
seq = read_seqcount_begin(&c->usage_lock);
|
2023-05-22 18:39:44 +00:00
|
|
|
unsafe_memcpy(&ret->u, c->usage_base,
|
|
|
|
__fs_usage_u64s(nr_replicas) * sizeof(u64),
|
2022-12-28 20:17:07 +00:00
|
|
|
"embedded variable length struct");
|
2019-02-11 00:34:47 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
2023-05-22 18:39:44 +00:00
|
|
|
acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
|
|
|
|
__fs_usage_u64s(nr_replicas));
|
2019-02-11 00:34:47 +00:00
|
|
|
} while (read_seqcount_retry(&c->usage_lock, seq));
|
2019-01-21 20:32:13 +00:00
|
|
|
|
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
|
|
|
|
{
|
2021-01-22 02:52:06 +00:00
|
|
|
struct bch_dev *ca;
|
|
|
|
unsigned i, u64s = fs_usage_u64s(c);
|
2019-02-11 00:34:47 +00:00
|
|
|
|
|
|
|
BUG_ON(idx >= ARRAY_SIZE(c->usage));
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
write_seqcount_begin(&c->usage_lock);
|
|
|
|
|
|
|
|
acc_u64s_percpu((u64 *) c->usage_base,
|
|
|
|
(u64 __percpu *) c->usage[idx], u64s);
|
|
|
|
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
|
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
for_each_member_device_rcu(ca, c, i, NULL) {
|
|
|
|
u64s = dev_usage_u64s();
|
|
|
|
|
|
|
|
acc_u64s_percpu((u64 *) ca->usage_base,
|
|
|
|
(u64 __percpu *) ca->usage[idx], u64s);
|
|
|
|
percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
write_seqcount_end(&c->usage_lock);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_fs_usage_to_text(struct printbuf *out,
|
|
|
|
struct bch_fs *c,
|
|
|
|
struct bch_fs_usage_online *fs_usage)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
|
2019-02-11 00:34:47 +00:00
|
|
|
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "hidden:\t\t\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.hidden);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "data:\t\t\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.data);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "cached:\t\t\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.cached);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "reserved:\t\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.reserved);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "nr_inodes:\t\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.nr_inodes);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "online reserved:\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->online_reserved);
|
|
|
|
|
|
|
|
for (i = 0;
|
|
|
|
i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
|
|
|
|
i++) {
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "%u replicas:\n", i + 1);
|
|
|
|
prt_printf(out, "\treserved:\t\t%llu\n",
|
2019-02-11 00:34:47 +00:00
|
|
|
fs_usage->u.persistent_reserved[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < c->replicas.nr; i++) {
|
|
|
|
struct bch_replicas_entry *e =
|
|
|
|
cpu_replicas_entry(&c->replicas, i);
|
|
|
|
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, "\t");
|
2019-02-11 00:34:47 +00:00
|
|
|
bch2_replicas_entry_to_text(out, e);
|
2023-02-04 02:01:40 +00:00
|
|
|
prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
|
2019-02-11 00:34:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
static u64 reserve_factor(u64 r)
|
|
|
|
{
|
|
|
|
return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
|
|
|
|
}
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2019-02-11 00:34:47 +00:00
|
|
|
return min(fs_usage->u.hidden +
|
|
|
|
fs_usage->u.btree +
|
|
|
|
fs_usage->u.data +
|
|
|
|
reserve_factor(fs_usage->u.reserved +
|
2019-02-14 23:38:52 +00:00
|
|
|
fs_usage->online_reserved),
|
2019-01-21 20:32:13 +00:00
|
|
|
c->capacity);
|
2018-11-27 13:23:22 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 23:38:52 +00:00
|
|
|
static struct bch_fs_usage_short
|
|
|
|
__bch2_fs_usage_read_short(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_fs_usage_short ret;
|
|
|
|
u64 data, reserved;
|
|
|
|
|
|
|
|
ret.capacity = c->capacity -
|
2019-02-11 00:34:47 +00:00
|
|
|
bch2_fs_usage_read_one(c, &c->usage_base->hidden);
|
2019-02-14 23:38:52 +00:00
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
|
|
|
|
bch2_fs_usage_read_one(c, &c->usage_base->btree);
|
|
|
|
reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
|
|
|
|
percpu_u64_get(c->online_reserved);
|
2019-02-14 23:38:52 +00:00
|
|
|
|
|
|
|
ret.used = min(ret.capacity, data + reserve_factor(reserved));
|
|
|
|
ret.free = ret.capacity - ret.used;
|
|
|
|
|
2019-02-11 00:34:47 +00:00
|
|
|
ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
|
2019-02-14 23:38:52 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
struct bch_fs_usage_short
|
|
|
|
bch2_fs_usage_read_short(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_fs_usage_short ret;
|
|
|
|
|
2019-02-14 23:38:52 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
|
|
|
ret = __bch2_fs_usage_read_short(c);
|
|
|
|
percpu_up_read(&c->mark_lock);
|
2018-11-27 13:23:22 +00:00
|
|
|
|
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
void bch2_dev_usage_init(struct bch_dev *ca)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2022-04-01 05:29:59 +00:00
|
|
|
ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 01:51:51 +00:00
|
|
|
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
|
2022-02-11 00:09:40 +00:00
|
|
|
struct bch_alloc_v4 a)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2022-02-11 00:09:40 +00:00
|
|
|
return a.dirty_sectors
|
|
|
|
? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
|
2021-01-22 01:51:51 +00:00
|
|
|
: 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
|
2022-02-11 00:09:40 +00:00
|
|
|
struct bch_alloc_v4 old,
|
|
|
|
struct bch_alloc_v4 new,
|
2021-01-22 02:52:06 +00:00
|
|
|
u64 journal_seq, bool gc)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_fs_usage *fs_usage;
|
2020-06-19 01:06:42 +00:00
|
|
|
struct bch_dev_usage *u;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
preempt_disable();
|
2021-06-11 01:44:27 +00:00
|
|
|
fs_usage = fs_usage_ptr(c, journal_seq, gc);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
if (data_type_is_hidden(old.data_type))
|
|
|
|
fs_usage->hidden -= ca->mi.bucket_size;
|
|
|
|
if (data_type_is_hidden(new.data_type))
|
|
|
|
fs_usage->hidden += ca->mi.bucket_size;
|
|
|
|
|
|
|
|
u = dev_usage_ptr(ca, journal_seq, gc);
|
2018-11-24 22:09:44 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
u->d[old.data_type].buckets--;
|
|
|
|
u->d[new.data_type].buckets++;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
u->buckets_ec -= (int) !!old.stripe;
|
|
|
|
u->buckets_ec += (int) !!new.stripe;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-01-22 01:51:51 +00:00
|
|
|
u->d[old.data_type].sectors -= old.dirty_sectors;
|
|
|
|
u->d[new.data_type].sectors += new.dirty_sectors;
|
2022-04-01 05:29:59 +00:00
|
|
|
|
|
|
|
u->d[BCH_DATA_cached].sectors += new.cached_sectors;
|
|
|
|
u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
|
2021-01-22 01:51:51 +00:00
|
|
|
|
|
|
|
u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
|
|
|
|
u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2022-02-11 00:09:40 +00:00
|
|
|
static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
|
2022-02-14 05:07:38 +00:00
|
|
|
struct bucket old, struct bucket new,
|
2022-02-11 00:09:40 +00:00
|
|
|
u64 journal_seq, bool gc)
|
|
|
|
{
|
|
|
|
struct bch_alloc_v4 old_a = {
|
|
|
|
.gen = old.gen,
|
|
|
|
.data_type = old.data_type,
|
|
|
|
.dirty_sectors = old.dirty_sectors,
|
|
|
|
.cached_sectors = old.cached_sectors,
|
|
|
|
.stripe = old.stripe,
|
|
|
|
};
|
|
|
|
struct bch_alloc_v4 new_a = {
|
|
|
|
.gen = new.gen,
|
|
|
|
.data_type = new.data_type,
|
|
|
|
.dirty_sectors = new.dirty_sectors,
|
|
|
|
.cached_sectors = new.cached_sectors,
|
|
|
|
.stripe = new.stripe,
|
|
|
|
};
|
|
|
|
|
|
|
|
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
|
|
|
|
}
|
|
|
|
|
2021-06-11 01:44:27 +00:00
|
|
|
static inline int __update_replicas(struct bch_fs *c,
|
|
|
|
struct bch_fs_usage *fs_usage,
|
|
|
|
struct bch_replicas_entry *r,
|
|
|
|
s64 sectors)
|
|
|
|
{
|
|
|
|
int idx = bch2_replicas_entry_idx(c, r);
|
|
|
|
|
|
|
|
if (idx < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
|
|
|
|
fs_usage->replicas[idx] += sectors;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-28 20:13:54 +00:00
|
|
|
static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_replicas_entry *r, s64 sectors,
|
|
|
|
unsigned journal_seq, bool gc)
|
2019-01-21 20:32:13 +00:00
|
|
|
{
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_fs_usage __percpu *fs_usage;
|
2021-11-28 19:31:19 +00:00
|
|
|
int idx, ret = 0;
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2019-01-21 20:32:13 +00:00
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2022-02-25 18:18:19 +00:00
|
|
|
buf.atomic++;
|
2021-11-28 19:31:19 +00:00
|
|
|
|
|
|
|
idx = bch2_replicas_entry_idx(c, r);
|
2021-11-28 20:13:54 +00:00
|
|
|
if (idx < 0 &&
|
2022-04-13 00:03:19 +00:00
|
|
|
fsck_err(c, "no replicas entry\n"
|
|
|
|
" while marking %s",
|
|
|
|
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
2021-11-28 20:13:54 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
|
|
|
ret = bch2_mark_replicas(c, r);
|
|
|
|
percpu_down_read(&c->mark_lock);
|
2022-02-25 18:18:19 +00:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2021-11-28 20:13:54 +00:00
|
|
|
idx = bch2_replicas_entry_idx(c, r);
|
|
|
|
}
|
2021-11-28 19:31:19 +00:00
|
|
|
if (idx < 0) {
|
|
|
|
ret = -1;
|
|
|
|
goto err;
|
|
|
|
}
|
2019-01-21 20:32:13 +00:00
|
|
|
|
2021-06-11 01:44:27 +00:00
|
|
|
preempt_disable();
|
|
|
|
fs_usage = fs_usage_ptr(c, journal_seq, gc);
|
2020-07-09 22:31:51 +00:00
|
|
|
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
|
2019-02-14 23:38:52 +00:00
|
|
|
fs_usage->replicas[idx] += sectors;
|
2021-06-11 01:44:27 +00:00
|
|
|
preempt_enable();
|
2021-11-28 19:31:19 +00:00
|
|
|
err:
|
2021-11-28 20:13:54 +00:00
|
|
|
fsck_err:
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2019-01-21 20:32:13 +00:00
|
|
|
}
|
|
|
|
|
2021-04-21 22:08:39 +00:00
|
|
|
static inline int update_cached_sectors(struct bch_fs *c,
|
2021-11-28 20:13:54 +00:00
|
|
|
struct bkey_s_c k,
|
2021-06-11 01:44:27 +00:00
|
|
|
unsigned dev, s64 sectors,
|
|
|
|
unsigned journal_seq, bool gc)
|
2019-01-21 20:32:13 +00:00
|
|
|
{
|
|
|
|
struct bch_replicas_padded r;
|
|
|
|
|
|
|
|
bch2_replicas_entry_cached(&r.e, dev);
|
|
|
|
|
2021-11-28 20:13:54 +00:00
|
|
|
return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
|
2019-01-21 20:32:13 +00:00
|
|
|
}
|
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
|
|
|
|
gfp_t gfp)
|
2019-05-24 15:56:20 +00:00
|
|
|
{
|
|
|
|
struct replicas_delta_list *d = trans->fs_usage_deltas;
|
|
|
|
unsigned new_size = d ? (d->size + more) * 2 : 128;
|
2021-04-24 04:24:25 +00:00
|
|
|
unsigned alloc_size = sizeof(*d) + new_size;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
|
2019-05-24 15:56:20 +00:00
|
|
|
|
|
|
|
if (!d || d->used + more > d->size) {
|
2023-05-28 23:23:35 +00:00
|
|
|
d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
|
2021-04-24 04:24:25 +00:00
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
if (unlikely(!d)) {
|
|
|
|
if (alloc_size > REPLICAS_DELTA_LIST_MAX)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
|
|
|
|
if (!d)
|
|
|
|
return -ENOMEM;
|
2021-04-24 04:24:25 +00:00
|
|
|
|
|
|
|
memset(d, 0, REPLICAS_DELTA_LIST_MAX);
|
|
|
|
|
|
|
|
if (trans->fs_usage_deltas)
|
|
|
|
memcpy(d, trans->fs_usage_deltas,
|
|
|
|
trans->fs_usage_deltas->size + sizeof(*d));
|
|
|
|
|
|
|
|
new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
|
|
|
|
kfree(trans->fs_usage_deltas);
|
|
|
|
}
|
2019-05-24 15:56:20 +00:00
|
|
|
|
|
|
|
d->size = new_size;
|
|
|
|
trans->fs_usage_deltas = d;
|
|
|
|
}
|
2023-05-28 23:23:35 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
|
|
|
|
{
|
|
|
|
return allocate_dropping_locks_errcode(trans,
|
|
|
|
__replicas_deltas_realloc(trans, more, _gfp));
|
2019-05-24 15:56:20 +00:00
|
|
|
}
|
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
static inline int update_replicas_list(struct btree_trans *trans,
|
2019-05-24 15:56:20 +00:00
|
|
|
struct bch_replicas_entry *r,
|
|
|
|
s64 sectors)
|
|
|
|
{
|
|
|
|
struct replicas_delta_list *d;
|
|
|
|
struct replicas_delta *n;
|
2019-08-22 17:20:38 +00:00
|
|
|
unsigned b;
|
2023-05-28 23:23:35 +00:00
|
|
|
int ret;
|
2019-08-22 17:20:38 +00:00
|
|
|
|
|
|
|
if (!sectors)
|
2023-05-28 23:23:35 +00:00
|
|
|
return 0;
|
2019-05-24 15:56:20 +00:00
|
|
|
|
2019-08-22 17:20:38 +00:00
|
|
|
b = replicas_entry_bytes(r) + 8;
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = replicas_deltas_realloc(trans, b);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-05-24 15:56:20 +00:00
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
d = trans->fs_usage_deltas;
|
2019-05-24 15:56:20 +00:00
|
|
|
n = (void *) d->d + d->used;
|
|
|
|
n->delta = sectors;
|
|
|
|
memcpy((void *) n + offsetof(struct replicas_delta, r),
|
|
|
|
r, replicas_entry_bytes(r));
|
2021-04-03 23:41:09 +00:00
|
|
|
bch2_replicas_entry_sort(&n->r);
|
2019-05-24 15:56:20 +00:00
|
|
|
d->used += b;
|
2023-05-28 23:23:35 +00:00
|
|
|
return 0;
|
2019-05-24 15:56:20 +00:00
|
|
|
}
|
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
static inline int update_cached_sectors_list(struct btree_trans *trans,
|
2019-05-24 15:56:20 +00:00
|
|
|
unsigned dev, s64 sectors)
|
|
|
|
{
|
|
|
|
struct bch_replicas_padded r;
|
|
|
|
|
|
|
|
bch2_replicas_entry_cached(&r.e, dev);
|
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
return update_replicas_list(trans, &r.e, sectors);
|
2019-05-24 15:56:20 +00:00
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_alloc(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2019-02-13 19:46:32 +00:00
|
|
|
{
|
2019-12-31 21:17:42 +00:00
|
|
|
bool gc = flags & BTREE_TRIGGER_GC;
|
2021-10-29 22:43:18 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
2023-01-31 01:58:43 +00:00
|
|
|
u64 bucket_journal_seq;
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2023-01-31 01:58:43 +00:00
|
|
|
struct bch_alloc_v4 old_a_convert, new_a_convert;
|
|
|
|
const struct bch_alloc_v4 *old_a, *new_a;
|
2022-04-05 17:44:18 +00:00
|
|
|
struct bch_dev *ca;
|
2021-11-28 19:31:19 +00:00
|
|
|
int ret = 0;
|
2020-07-06 23:16:25 +00:00
|
|
|
|
2019-02-13 19:46:32 +00:00
|
|
|
/*
|
|
|
|
* alloc btree is read in by bch2_alloc_read, not gc:
|
|
|
|
*/
|
2019-12-31 21:17:42 +00:00
|
|
|
if ((flags & BTREE_TRIGGER_GC) &&
|
|
|
|
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
|
2019-02-13 19:46:32 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-04-05 17:44:18 +00:00
|
|
|
if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
|
|
|
|
"alloc key for invalid device or bucket"))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
ca = bch_dev_bkey_exists(c, new.k->p.inode);
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
old_a = bch2_alloc_to_v4(old, &old_a_convert);
|
|
|
|
new_a = bch2_alloc_to_v4(new, &new_a_convert);
|
|
|
|
|
|
|
|
bucket_journal_seq = new_a->journal_seq;
|
2022-01-01 01:03:29 +00:00
|
|
|
|
2021-12-28 04:56:13 +00:00
|
|
|
if ((flags & BTREE_TRIGGER_INSERT) &&
|
2023-01-31 01:58:43 +00:00
|
|
|
data_type_is_empty(old_a->data_type) !=
|
|
|
|
data_type_is_empty(new_a->data_type) &&
|
2022-01-01 01:03:29 +00:00
|
|
|
new.k->type == KEY_TYPE_alloc_v4) {
|
|
|
|
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
|
2021-10-30 01:14:23 +00:00
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
EBUG_ON(!journal_seq);
|
2021-10-30 01:14:23 +00:00
|
|
|
|
2021-12-28 04:56:13 +00:00
|
|
|
/*
|
|
|
|
* If the btree updates referring to a bucket weren't flushed
|
|
|
|
* before the bucket became empty again, then the we don't have
|
|
|
|
* to wait on a journal flush before we can reuse the bucket:
|
|
|
|
*/
|
2023-01-31 01:58:43 +00:00
|
|
|
v->journal_seq = bucket_journal_seq =
|
|
|
|
data_type_is_empty(new_a->data_type) &&
|
2022-01-01 01:03:29 +00:00
|
|
|
(journal_seq == v->journal_seq ||
|
|
|
|
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
|
2021-12-28 04:56:13 +00:00
|
|
|
? 0 : journal_seq;
|
2021-10-30 01:14:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
if (!data_type_is_empty(old_a->data_type) &&
|
|
|
|
data_type_is_empty(new_a->data_type) &&
|
|
|
|
bucket_journal_seq) {
|
2022-01-05 03:32:09 +00:00
|
|
|
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
|
|
|
|
c->journal.flushed_seq_ondisk,
|
2022-01-01 01:03:29 +00:00
|
|
|
new.k->p.inode, new.k->p.offset,
|
2023-01-31 01:58:43 +00:00
|
|
|
bucket_journal_seq);
|
2022-01-05 03:32:09 +00:00
|
|
|
if (ret) {
|
|
|
|
bch2_fs_fatal_error(c,
|
|
|
|
"error setting bucket_needs_journal_commit: %i", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2023-01-31 01:58:43 +00:00
|
|
|
if (!gc && new_a->gen != old_a->gen)
|
|
|
|
*bucket_gen(ca, new.k->p.offset) = new_a->gen;
|
2021-12-26 00:55:34 +00:00
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
|
2022-02-11 00:09:40 +00:00
|
|
|
|
2022-02-11 00:26:55 +00:00
|
|
|
if (gc) {
|
|
|
|
struct bucket *g = gc_bucket(ca, new.k->p.offset);
|
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
bucket_lock(g);
|
2022-02-11 00:26:55 +00:00
|
|
|
|
|
|
|
g->gen_valid = 1;
|
2023-01-31 01:58:43 +00:00
|
|
|
g->gen = new_a->gen;
|
|
|
|
g->data_type = new_a->data_type;
|
|
|
|
g->stripe = new_a->stripe;
|
|
|
|
g->stripe_redundancy = new_a->stripe_redundancy;
|
|
|
|
g->dirty_sectors = new_a->dirty_sectors;
|
|
|
|
g->cached_sectors = new_a->cached_sectors;
|
2022-02-14 05:07:38 +00:00
|
|
|
|
|
|
|
bucket_unlock(g);
|
2022-02-11 00:26:55 +00:00
|
|
|
}
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2019-02-13 19:46:32 +00:00
|
|
|
|
2019-03-11 18:59:58 +00:00
|
|
|
/*
|
|
|
|
* need to know if we're getting called from the invalidate path or
|
|
|
|
* not:
|
|
|
|
*/
|
|
|
|
|
2019-12-31 21:17:42 +00:00
|
|
|
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
|
2023-01-31 01:58:43 +00:00
|
|
|
old_a->cached_sectors) {
|
2021-11-28 20:13:54 +00:00
|
|
|
ret = update_cached_sectors(c, new, ca->dev_idx,
|
2023-01-31 01:58:43 +00:00
|
|
|
-((s64) old_a->cached_sectors),
|
2021-11-28 20:13:54 +00:00
|
|
|
journal_seq, gc);
|
2021-11-28 19:31:19 +00:00
|
|
|
if (ret) {
|
2022-10-22 19:10:28 +00:00
|
|
|
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
|
|
|
|
__func__);
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2021-04-21 22:08:39 +00:00
|
|
|
}
|
2019-02-13 19:46:32 +00:00
|
|
|
}
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
if (new_a->data_type == BCH_DATA_free &&
|
|
|
|
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
|
2022-07-18 01:33:00 +00:00
|
|
|
closure_wake_up(&c->freelist_wait);
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
if (new_a->data_type == BCH_DATA_need_discard &&
|
|
|
|
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
|
2022-07-18 01:33:00 +00:00
|
|
|
bch2_do_discards(c);
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
if (old_a->data_type != BCH_DATA_cached &&
|
|
|
|
new_a->data_type == BCH_DATA_cached &&
|
2022-07-18 01:33:00 +00:00
|
|
|
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
|
|
|
|
bch2_do_invalidates(c);
|
|
|
|
|
2023-01-31 01:58:43 +00:00
|
|
|
if (new_a->data_type == BCH_DATA_need_gc_gens)
|
2022-07-18 01:33:00 +00:00
|
|
|
bch2_do_gc_gens(c);
|
|
|
|
|
2019-02-13 19:46:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-02 22:00:04 +00:00
|
|
|
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
size_t b, enum bch_data_type data_type,
|
|
|
|
unsigned sectors, struct gc_pos pos,
|
|
|
|
unsigned flags)
|
2018-07-23 09:32:01 +00:00
|
|
|
{
|
2022-02-14 05:07:38 +00:00
|
|
|
struct bucket old, new, *g;
|
2022-04-02 22:00:04 +00:00
|
|
|
int ret = 0;
|
2018-07-23 09:32:01 +00:00
|
|
|
|
2021-12-24 09:27:01 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
2020-07-09 22:28:11 +00:00
|
|
|
BUG_ON(data_type != BCH_DATA_sb &&
|
|
|
|
data_type != BCH_DATA_journal);
|
2018-07-23 09:32:01 +00:00
|
|
|
|
2021-12-24 09:27:01 +00:00
|
|
|
/*
|
|
|
|
* Backup superblock might be past the end of our normal usable space:
|
|
|
|
*/
|
|
|
|
if (b >= ca->mi.nbuckets)
|
2022-04-02 22:00:04 +00:00
|
|
|
return 0;
|
2021-12-24 09:27:01 +00:00
|
|
|
|
|
|
|
percpu_down_read(&c->mark_lock);
|
2021-12-26 03:37:19 +00:00
|
|
|
g = gc_bucket(ca, b);
|
2022-02-14 05:07:38 +00:00
|
|
|
|
|
|
|
bucket_lock(g);
|
|
|
|
old = *g;
|
|
|
|
|
2022-04-02 22:00:04 +00:00
|
|
|
if (bch2_fs_inconsistent_on(g->data_type &&
|
|
|
|
g->data_type != data_type, c,
|
|
|
|
"different types of data in same bucket: %s, %s",
|
|
|
|
bch2_data_types[g->data_type],
|
|
|
|
bch2_data_types[data_type])) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
|
|
|
|
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
|
|
|
|
ca->dev_idx, b, g->gen,
|
|
|
|
bch2_data_types[g->data_type ?: data_type],
|
|
|
|
g->dirty_sectors, sectors)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
g->data_type = data_type;
|
|
|
|
g->dirty_sectors += sectors;
|
|
|
|
new = *g;
|
2022-04-02 22:00:04 +00:00
|
|
|
err:
|
2022-02-14 05:07:38 +00:00
|
|
|
bucket_unlock(g);
|
2022-04-02 22:00:04 +00:00
|
|
|
if (!ret)
|
|
|
|
bch2_dev_usage_update_m(c, ca, old, new, 0, true);
|
2021-12-24 09:27:01 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2022-04-02 22:00:04 +00:00
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2023-02-12 00:31:03 +00:00
|
|
|
static int check_bucket_ref(struct btree_trans *trans,
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bkey_s_c k,
|
2020-10-20 02:36:24 +00:00
|
|
|
const struct bch_extent_ptr *ptr,
|
|
|
|
s64 sectors, enum bch_data_type ptr_data_type,
|
2022-02-13 23:15:35 +00:00
|
|
|
u8 b_gen, u8 bucket_data_type,
|
2022-02-14 05:07:38 +00:00
|
|
|
u32 dirty_sectors, u32 cached_sectors)
|
2019-03-11 18:59:58 +00:00
|
|
|
{
|
2023-02-12 00:31:03 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-02-13 23:15:35 +00:00
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
|
|
|
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
|
2020-10-20 02:36:24 +00:00
|
|
|
u16 bucket_sectors = !ptr->cached
|
2020-06-03 22:27:07 +00:00
|
|
|
? dirty_sectors
|
|
|
|
: cached_sectors;
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
int ret = 0;
|
2020-06-03 22:27:07 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
if (bucket_data_type == BCH_DATA_cached)
|
|
|
|
bucket_data_type = BCH_DATA_user;
|
|
|
|
|
2022-10-09 05:08:51 +00:00
|
|
|
if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
|
|
|
|
(bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
|
|
|
|
bucket_data_type = ptr_data_type = BCH_DATA_stripe;
|
|
|
|
|
2022-02-13 23:15:35 +00:00
|
|
|
if (gen_after(ptr->gen, b_gen)) {
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
|
|
|
"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
|
|
|
|
"while marking %s",
|
2022-02-13 23:15:35 +00:00
|
|
|
ptr->dev, bucket_nr, b_gen,
|
2020-10-20 02:36:24 +00:00
|
|
|
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
|
|
|
ptr->gen,
|
2022-02-25 18:18:19 +00:00
|
|
|
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
2020-06-03 22:27:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-13 23:15:35 +00:00
|
|
|
if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
|
|
|
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
|
|
|
|
"while marking %s",
|
2022-02-13 23:15:35 +00:00
|
|
|
ptr->dev, bucket_nr, b_gen,
|
2020-10-20 02:36:24 +00:00
|
|
|
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
|
|
|
ptr->gen,
|
2022-02-25 18:18:19 +00:00
|
|
|
(printbuf_reset(&buf),
|
|
|
|
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
2020-06-03 22:27:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-13 23:15:35 +00:00
|
|
|
if (b_gen != ptr->gen && !ptr->cached) {
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
2022-02-13 23:15:35 +00:00
|
|
|
"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
|
2020-06-03 22:27:07 +00:00
|
|
|
"while marking %s",
|
2022-02-13 23:15:35 +00:00
|
|
|
ptr->dev, bucket_nr, b_gen,
|
|
|
|
*bucket_gen(ca, bucket_nr),
|
2020-10-20 02:36:24 +00:00
|
|
|
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
|
|
|
ptr->gen,
|
2022-02-25 18:18:19 +00:00
|
|
|
(printbuf_reset(&buf),
|
|
|
|
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
2020-06-03 22:27:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-25 18:18:19 +00:00
|
|
|
if (b_gen != ptr->gen) {
|
|
|
|
ret = 1;
|
2023-02-12 00:31:03 +00:00
|
|
|
goto out;
|
2022-02-25 18:18:19 +00:00
|
|
|
}
|
2020-06-03 22:27:07 +00:00
|
|
|
|
2022-04-01 05:29:59 +00:00
|
|
|
if (!data_type_is_empty(bucket_data_type) &&
|
|
|
|
ptr_data_type &&
|
2020-10-20 02:36:24 +00:00
|
|
|
bucket_data_type != ptr_data_type) {
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
|
|
|
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
|
|
|
|
"while marking %s",
|
2022-02-13 23:15:35 +00:00
|
|
|
ptr->dev, bucket_nr, b_gen,
|
2020-10-20 02:36:24 +00:00
|
|
|
bch2_data_types[bucket_data_type],
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_data_types[ptr_data_type],
|
2022-02-25 18:18:19 +00:00
|
|
|
(printbuf_reset(&buf),
|
|
|
|
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
2020-06-03 22:27:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
if ((unsigned) (bucket_sectors + sectors) > U32_MAX) {
|
2020-06-03 22:27:07 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
|
|
|
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
|
|
|
|
"while marking %s",
|
2022-02-13 23:15:35 +00:00
|
|
|
ptr->dev, bucket_nr, b_gen,
|
2020-10-20 02:36:24 +00:00
|
|
|
bch2_data_types[bucket_data_type ?: ptr_data_type],
|
|
|
|
bucket_sectors, sectors,
|
2022-02-25 18:18:19 +00:00
|
|
|
(printbuf_reset(&buf),
|
|
|
|
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
2020-06-03 22:27:07 +00:00
|
|
|
}
|
2023-02-12 00:31:03 +00:00
|
|
|
out:
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
|
|
|
return ret;
|
2023-02-12 00:31:03 +00:00
|
|
|
err:
|
|
|
|
bch2_dump_trans_updates(trans);
|
|
|
|
goto out;
|
2020-10-20 02:36:24 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 22:43:18 +00:00
|
|
|
static int mark_stripe_bucket(struct btree_trans *trans,
|
|
|
|
struct bkey_s_c k,
|
|
|
|
unsigned ptr_idx,
|
2022-01-05 03:32:09 +00:00
|
|
|
unsigned flags)
|
2020-10-20 02:36:24 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-01-05 03:32:09 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
2020-07-09 22:31:51 +00:00
|
|
|
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
|
|
|
unsigned nr_data = s->nr_blocks - s->nr_redundant;
|
|
|
|
bool parity = ptr_idx >= nr_data;
|
2023-03-02 02:47:07 +00:00
|
|
|
enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
|
2021-11-29 21:38:27 +00:00
|
|
|
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
|
2020-07-09 22:31:51 +00:00
|
|
|
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
|
2020-10-20 02:36:24 +00:00
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
2022-02-14 05:07:38 +00:00
|
|
|
struct bucket old, new, *g;
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-11-28 19:31:19 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
|
|
|
|
2021-11-29 21:38:27 +00:00
|
|
|
/* * XXX doesn't handle deletion */
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2022-02-25 18:18:19 +00:00
|
|
|
buf.atomic++;
|
2021-12-26 03:37:19 +00:00
|
|
|
g = PTR_GC_BUCKET(ca, ptr);
|
2020-10-20 02:36:24 +00:00
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
if (g->dirty_sectors ||
|
2021-11-29 21:38:27 +00:00
|
|
|
(g->stripe && g->stripe != k.k->p.offset)) {
|
2021-01-22 23:01:07 +00:00
|
|
|
bch2_fs_inconsistent(c,
|
|
|
|
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
|
2022-02-14 05:07:38 +00:00
|
|
|
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
|
2022-02-25 18:18:19 +00:00
|
|
|
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
2021-11-28 19:31:19 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
2021-01-22 23:01:07 +00:00
|
|
|
}
|
2020-07-23 03:11:48 +00:00
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
bucket_lock(g);
|
|
|
|
old = *g;
|
2020-10-20 02:36:24 +00:00
|
|
|
|
2023-02-12 00:31:03 +00:00
|
|
|
ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
|
2022-04-02 22:00:04 +00:00
|
|
|
g->gen, g->data_type,
|
|
|
|
g->dirty_sectors, g->cached_sectors);
|
|
|
|
if (ret)
|
2022-02-14 05:07:38 +00:00
|
|
|
goto err;
|
2020-07-09 22:31:51 +00:00
|
|
|
|
2023-03-02 02:47:07 +00:00
|
|
|
g->data_type = data_type;
|
2022-04-02 22:00:04 +00:00
|
|
|
g->dirty_sectors += sectors;
|
2020-10-20 02:36:24 +00:00
|
|
|
|
2021-01-22 23:01:07 +00:00
|
|
|
g->stripe = k.k->p.offset;
|
|
|
|
g->stripe_redundancy = s->nr_redundant;
|
2022-02-14 05:07:38 +00:00
|
|
|
new = *g;
|
2021-11-28 19:31:19 +00:00
|
|
|
err:
|
2022-04-02 22:00:04 +00:00
|
|
|
bucket_unlock(g);
|
|
|
|
if (!ret)
|
|
|
|
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
|
|
|
return ret;
|
2020-10-20 02:36:24 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 22:43:18 +00:00
|
|
|
static int __mark_pointer(struct btree_trans *trans,
|
|
|
|
struct bkey_s_c k,
|
2020-10-20 02:36:24 +00:00
|
|
|
const struct bch_extent_ptr *ptr,
|
|
|
|
s64 sectors, enum bch_data_type ptr_data_type,
|
|
|
|
u8 bucket_gen, u8 *bucket_data_type,
|
2022-02-14 05:07:38 +00:00
|
|
|
u32 *dirty_sectors, u32 *cached_sectors)
|
2020-10-20 02:36:24 +00:00
|
|
|
{
|
2022-02-14 05:07:38 +00:00
|
|
|
u32 *dst_sectors = !ptr->cached
|
2020-10-20 02:36:24 +00:00
|
|
|
? dirty_sectors
|
|
|
|
: cached_sectors;
|
2023-02-12 00:31:03 +00:00
|
|
|
int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
|
2020-10-20 02:36:24 +00:00
|
|
|
bucket_gen, *bucket_data_type,
|
|
|
|
*dirty_sectors, *cached_sectors);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*dst_sectors += sectors;
|
2020-06-03 22:27:07 +00:00
|
|
|
*bucket_data_type = *dirty_sectors || *cached_sectors
|
|
|
|
? ptr_data_type : 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-29 22:43:18 +00:00
|
|
|
static int bch2_mark_pointer(struct btree_trans *trans,
|
2023-03-02 02:47:07 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bkey_s_c k,
|
2020-06-03 22:27:07 +00:00
|
|
|
struct extent_ptr_decoded p,
|
2023-03-02 02:47:07 +00:00
|
|
|
s64 sectors,
|
2021-10-29 22:43:18 +00:00
|
|
|
unsigned flags)
|
2018-11-01 19:21:48 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
|
|
|
struct bch_fs *c = trans->c;
|
2018-11-01 19:21:48 +00:00
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
2022-02-14 05:07:38 +00:00
|
|
|
struct bucket old, new, *g;
|
2023-03-02 02:47:07 +00:00
|
|
|
enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
|
2020-06-03 22:27:07 +00:00
|
|
|
u8 bucket_data_type;
|
2021-11-28 19:31:19 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2021-12-26 03:37:19 +00:00
|
|
|
g = PTR_GC_BUCKET(ca, &p.ptr);
|
2022-02-14 05:07:38 +00:00
|
|
|
bucket_lock(g);
|
|
|
|
old = *g;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-02-14 05:07:38 +00:00
|
|
|
bucket_data_type = g->data_type;
|
|
|
|
ret = __mark_pointer(trans, k, &p.ptr, sectors,
|
|
|
|
data_type, g->gen,
|
|
|
|
&bucket_data_type,
|
|
|
|
&g->dirty_sectors,
|
|
|
|
&g->cached_sectors);
|
2022-04-02 22:00:04 +00:00
|
|
|
if (!ret)
|
|
|
|
g->data_type = bucket_data_type;
|
2022-02-14 05:07:38 +00:00
|
|
|
|
|
|
|
new = *g;
|
|
|
|
bucket_unlock(g);
|
2022-04-02 22:00:04 +00:00
|
|
|
if (!ret)
|
|
|
|
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2019-02-14 20:42:41 +00:00
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 22:43:18 +00:00
|
|
|
static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
2021-11-28 20:13:54 +00:00
|
|
|
struct bkey_s_c k,
|
2018-11-24 22:09:44 +00:00
|
|
|
struct bch_extent_stripe_ptr p,
|
2019-01-21 20:32:13 +00:00
|
|
|
enum bch_data_type data_type,
|
2021-06-11 01:44:27 +00:00
|
|
|
s64 sectors,
|
2021-10-29 22:43:18 +00:00
|
|
|
unsigned flags)
|
2018-11-01 19:13:19 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2020-07-09 22:31:51 +00:00
|
|
|
struct bch_replicas_padded r;
|
2021-12-26 03:37:19 +00:00
|
|
|
struct gc_stripe *m;
|
2018-11-26 01:53:51 +00:00
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
2018-11-26 01:53:51 +00:00
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
2021-12-31 01:14:52 +00:00
|
|
|
if (!m) {
|
|
|
|
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
|
|
|
(u64) p.idx);
|
2023-03-14 19:35:57 +00:00
|
|
|
return -BCH_ERR_ENOMEM_mark_stripe_ptr;
|
2021-12-31 01:14:52 +00:00
|
|
|
}
|
2018-11-01 19:13:19 +00:00
|
|
|
|
2023-02-19 01:49:37 +00:00
|
|
|
mutex_lock(&c->ec_stripes_heap_lock);
|
2020-07-09 22:31:51 +00:00
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
if (!m || !m->alive) {
|
2023-02-19 01:49:37 +00:00
|
|
|
mutex_unlock(&c->ec_stripes_heap_lock);
|
2021-12-26 03:37:19 +00:00
|
|
|
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
|
|
|
|
(u64) p.idx);
|
|
|
|
bch2_inconsistent_error(c);
|
|
|
|
return -EIO;
|
2018-11-26 01:53:51 +00:00
|
|
|
}
|
2018-11-01 19:13:19 +00:00
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
m->block_sectors[p.block] += sectors;
|
|
|
|
|
|
|
|
r = m->r;
|
2023-02-19 01:49:37 +00:00
|
|
|
mutex_unlock(&c->ec_stripes_heap_lock);
|
2021-12-26 03:37:19 +00:00
|
|
|
|
|
|
|
r.e.data_type = data_type;
|
|
|
|
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
|
|
|
|
|
2018-11-24 22:09:44 +00:00
|
|
|
return 0;
|
2018-11-01 19:13:19 +00:00
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_extent(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
|
|
|
struct bch_fs *c = trans->c;
|
2022-10-19 22:31:33 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
2018-11-01 19:10:01 +00:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
|
|
|
const union bch_extent_entry *entry;
|
|
|
|
struct extent_ptr_decoded p;
|
2019-01-21 20:32:13 +00:00
|
|
|
struct bch_replicas_padded r;
|
2021-06-11 01:44:27 +00:00
|
|
|
enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
|
|
|
|
? BCH_DATA_btree
|
|
|
|
: BCH_DATA_user;
|
|
|
|
s64 sectors = bkey_is_btree_ptr(k.k)
|
2021-12-14 19:24:41 +00:00
|
|
|
? btree_sectors(c)
|
2021-06-11 01:44:27 +00:00
|
|
|
: k.k->size;
|
2019-01-21 20:32:13 +00:00
|
|
|
s64 dirty_sectors = 0;
|
2020-06-03 22:27:07 +00:00
|
|
|
bool stale;
|
2018-11-01 19:10:01 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
|
|
|
|
2019-01-21 20:32:13 +00:00
|
|
|
r.e.data_type = data_type;
|
|
|
|
r.e.nr_devs = 0;
|
|
|
|
r.e.nr_required = 1;
|
|
|
|
|
2018-11-01 19:10:01 +00:00
|
|
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
2021-06-11 01:44:27 +00:00
|
|
|
s64 disk_sectors = ptr_disk_sectors(sectors, p);
|
2020-06-03 22:27:07 +00:00
|
|
|
|
2021-08-17 19:29:21 +00:00
|
|
|
if (flags & BTREE_TRIGGER_OVERWRITE)
|
|
|
|
disk_sectors = -disk_sectors;
|
|
|
|
|
2023-03-02 02:47:07 +00:00
|
|
|
ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
|
2020-06-03 22:27:07 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
stale = ret > 0;
|
2018-11-05 07:31:48 +00:00
|
|
|
|
2019-01-21 20:32:13 +00:00
|
|
|
if (p.ptr.cached) {
|
2021-11-28 19:31:19 +00:00
|
|
|
if (!stale) {
|
2021-11-28 20:13:54 +00:00
|
|
|
ret = update_cached_sectors(c, k, p.ptr.dev,
|
2021-12-26 03:37:19 +00:00
|
|
|
disk_sectors, journal_seq, true);
|
2021-11-28 19:31:19 +00:00
|
|
|
if (ret) {
|
2022-10-22 19:10:28 +00:00
|
|
|
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
|
|
|
|
__func__);
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2021-04-21 22:08:39 +00:00
|
|
|
}
|
2021-11-28 19:31:19 +00:00
|
|
|
}
|
2019-10-08 22:45:29 +00:00
|
|
|
} else if (!p.has_ec) {
|
2019-01-21 20:32:13 +00:00
|
|
|
dirty_sectors += disk_sectors;
|
|
|
|
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
|
|
|
} else {
|
2021-11-28 20:13:54 +00:00
|
|
|
ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
|
2021-10-29 22:43:18 +00:00
|
|
|
disk_sectors, flags);
|
2019-10-08 22:45:29 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-11-01 19:10:01 +00:00
|
|
|
|
2019-10-07 19:57:47 +00:00
|
|
|
/*
|
|
|
|
* There may be other dirty pointers in this extent, but
|
|
|
|
* if so they're not required for mounting if we have an
|
|
|
|
* erasure coded pointer in this extent:
|
|
|
|
*/
|
2019-01-21 20:32:13 +00:00
|
|
|
r.e.nr_required = 0;
|
|
|
|
}
|
2018-11-01 19:21:48 +00:00
|
|
|
}
|
2018-11-05 07:31:48 +00:00
|
|
|
|
2021-04-21 22:08:39 +00:00
|
|
|
if (r.e.nr_devs) {
|
2021-12-26 03:37:19 +00:00
|
|
|
ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
|
2021-11-28 19:31:19 +00:00
|
|
|
if (ret) {
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-04-21 22:08:39 +00:00
|
|
|
|
2022-02-25 18:18:19 +00:00
|
|
|
bch2_bkey_val_to_text(&buf, c, k);
|
2022-10-22 19:10:28 +00:00
|
|
|
bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2021-04-21 22:08:39 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-24 22:09:44 +00:00
|
|
|
|
|
|
|
return 0;
|
2018-11-01 19:21:48 +00:00
|
|
|
}
|
2018-07-24 18:54:39 +00:00
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_stripe(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2018-11-01 19:13:19 +00:00
|
|
|
{
|
2019-12-31 21:17:42 +00:00
|
|
|
bool gc = flags & BTREE_TRIGGER_GC;
|
2021-10-29 22:43:18 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
|
|
|
struct bch_fs *c = trans->c;
|
2021-12-31 01:14:52 +00:00
|
|
|
u64 idx = new.k->p.offset;
|
2020-07-06 23:16:25 +00:00
|
|
|
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
|
|
|
|
? bkey_s_c_to_stripe(old).v : NULL;
|
|
|
|
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
|
|
|
? bkey_s_c_to_stripe(new).v : NULL;
|
2018-11-01 19:10:01 +00:00
|
|
|
unsigned i;
|
2020-10-20 02:36:24 +00:00
|
|
|
int ret;
|
2018-11-01 19:13:19 +00:00
|
|
|
|
2021-01-22 23:01:07 +00:00
|
|
|
BUG_ON(gc && old_s);
|
|
|
|
|
2021-12-05 04:07:33 +00:00
|
|
|
if (!gc) {
|
|
|
|
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
2021-10-29 20:29:13 +00:00
|
|
|
|
2023-02-19 03:11:50 +00:00
|
|
|
if (!m) {
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf1 = PRINTBUF;
|
|
|
|
struct printbuf buf2 = PRINTBUF;
|
2018-11-01 19:13:19 +00:00
|
|
|
|
2022-02-25 18:18:19 +00:00
|
|
|
bch2_bkey_val_to_text(&buf1, c, old);
|
|
|
|
bch2_bkey_val_to_text(&buf2, c, new);
|
2021-12-31 01:14:52 +00:00
|
|
|
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
|
2021-12-05 04:07:33 +00:00
|
|
|
"old %s\n"
|
2022-02-25 18:18:19 +00:00
|
|
|
"new %s", idx, buf1.buf, buf2.buf);
|
|
|
|
printbuf_exit(&buf2);
|
|
|
|
printbuf_exit(&buf1);
|
2021-12-05 04:07:33 +00:00
|
|
|
bch2_inconsistent_error(c);
|
|
|
|
return -1;
|
|
|
|
}
|
2020-10-23 22:40:30 +00:00
|
|
|
|
2021-12-05 04:07:33 +00:00
|
|
|
if (!new_s) {
|
|
|
|
bch2_stripes_heap_del(c, m, idx);
|
2021-03-12 21:55:28 +00:00
|
|
|
|
2021-12-05 04:07:33 +00:00
|
|
|
memset(m, 0, sizeof(*m));
|
|
|
|
} else {
|
|
|
|
m->sectors = le16_to_cpu(new_s->sectors);
|
|
|
|
m->algorithm = new_s->algorithm;
|
|
|
|
m->nr_blocks = new_s->nr_blocks;
|
|
|
|
m->nr_redundant = new_s->nr_redundant;
|
|
|
|
m->blocks_nonempty = 0;
|
2020-07-06 23:16:25 +00:00
|
|
|
|
2021-12-05 04:07:33 +00:00
|
|
|
for (i = 0; i < new_s->nr_blocks; i++)
|
|
|
|
m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
|
2019-01-21 20:32:13 +00:00
|
|
|
|
2023-02-19 03:11:50 +00:00
|
|
|
if (!old_s)
|
|
|
|
bch2_stripes_heap_insert(c, m, idx);
|
|
|
|
else
|
|
|
|
bch2_stripes_heap_update(c, m, idx);
|
2020-07-06 23:16:25 +00:00
|
|
|
}
|
2021-12-05 04:07:33 +00:00
|
|
|
} else {
|
2021-12-27 03:27:10 +00:00
|
|
|
struct gc_stripe *m =
|
|
|
|
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
|
|
|
|
2021-12-31 01:14:52 +00:00
|
|
|
if (!m) {
|
|
|
|
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
|
|
|
idx);
|
2023-03-14 19:35:57 +00:00
|
|
|
return -BCH_ERR_ENOMEM_mark_stripe;
|
2021-12-31 01:14:52 +00:00
|
|
|
}
|
2021-11-29 21:38:27 +00:00
|
|
|
/*
|
|
|
|
* This will be wrong when we bring back runtime gc: we should
|
|
|
|
* be unmarking the old key and then marking the new key
|
|
|
|
*/
|
2021-12-05 04:07:33 +00:00
|
|
|
m->alive = true;
|
|
|
|
m->sectors = le16_to_cpu(new_s->sectors);
|
|
|
|
m->nr_blocks = new_s->nr_blocks;
|
|
|
|
m->nr_redundant = new_s->nr_redundant;
|
|
|
|
|
|
|
|
for (i = 0; i < new_s->nr_blocks; i++)
|
|
|
|
m->ptrs[i] = new_s->ptrs[i];
|
|
|
|
|
|
|
|
bch2_bkey_to_replicas(&m->r.e, new);
|
2021-11-29 21:38:27 +00:00
|
|
|
|
2021-01-22 23:01:07 +00:00
|
|
|
/*
|
|
|
|
* gc recalculates this field from stripe ptr
|
|
|
|
* references:
|
|
|
|
*/
|
|
|
|
memset(m->block_sectors, 0, sizeof(m->block_sectors));
|
|
|
|
|
|
|
|
for (i = 0; i < new_s->nr_blocks; i++) {
|
2022-01-05 03:32:09 +00:00
|
|
|
ret = mark_stripe_bucket(trans, new, i, flags);
|
2021-01-22 23:01:07 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-11-28 20:13:54 +00:00
|
|
|
ret = update_replicas(c, new, &m->r.e,
|
2021-11-28 19:31:19 +00:00
|
|
|
((s64) m->sectors * m->nr_redundant),
|
|
|
|
journal_seq, gc);
|
|
|
|
if (ret) {
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-04-21 22:08:39 +00:00
|
|
|
|
2022-02-25 18:18:19 +00:00
|
|
|
bch2_bkey_val_to_text(&buf, c, new);
|
|
|
|
bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
|
|
|
|
printbuf_exit(&buf);
|
2021-11-28 19:31:19 +00:00
|
|
|
return ret;
|
2021-04-21 22:08:39 +00:00
|
|
|
}
|
2021-01-22 23:01:07 +00:00
|
|
|
}
|
|
|
|
|
2018-11-24 22:09:44 +00:00
|
|
|
return 0;
|
2018-11-01 19:13:19 +00:00
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_inode(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2021-06-11 01:44:27 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_fs_usage __percpu *fs_usage;
|
2021-10-30 01:14:23 +00:00
|
|
|
u64 journal_seq = trans->journal_res.seq;
|
2021-06-11 01:44:27 +00:00
|
|
|
|
2021-10-30 01:14:23 +00:00
|
|
|
if (flags & BTREE_TRIGGER_INSERT) {
|
2022-10-21 17:21:03 +00:00
|
|
|
struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
|
2021-10-30 01:14:23 +00:00
|
|
|
|
|
|
|
BUG_ON(!journal_seq);
|
2022-10-21 17:21:03 +00:00
|
|
|
BUG_ON(new.k->type != KEY_TYPE_inode_v3);
|
2021-10-30 01:14:23 +00:00
|
|
|
|
|
|
|
v->bi_journal_seq = cpu_to_le64(journal_seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BTREE_TRIGGER_GC) {
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2021-10-30 01:14:23 +00:00
|
|
|
preempt_disable();
|
2021-11-28 19:31:19 +00:00
|
|
|
|
2021-10-30 01:14:23 +00:00
|
|
|
fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
|
|
|
|
fs_usage->nr_inodes += bkey_is_inode(new.k);
|
|
|
|
fs_usage->nr_inodes -= bkey_is_inode(old.k);
|
2021-11-28 19:31:19 +00:00
|
|
|
|
2021-10-30 01:14:23 +00:00
|
|
|
preempt_enable();
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2021-10-30 01:14:23 +00:00
|
|
|
}
|
2021-06-11 01:44:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_reservation(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2021-06-11 01:44:27 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-10-19 22:31:33 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_fs_usage __percpu *fs_usage;
|
|
|
|
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
|
|
|
s64 sectors = (s64) k.k->size;
|
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
|
|
|
|
2021-06-11 01:44:27 +00:00
|
|
|
if (flags & BTREE_TRIGGER_OVERWRITE)
|
|
|
|
sectors = -sectors;
|
|
|
|
sectors *= replicas;
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2021-06-11 01:44:27 +00:00
|
|
|
preempt_disable();
|
2021-11-28 19:31:19 +00:00
|
|
|
|
2021-10-29 22:43:18 +00:00
|
|
|
fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
|
2021-06-11 01:44:27 +00:00
|
|
|
replicas = clamp_t(unsigned, replicas, 1,
|
|
|
|
ARRAY_SIZE(fs_usage->persistent_reserved));
|
|
|
|
|
|
|
|
fs_usage->reserved += sectors;
|
|
|
|
fs_usage->persistent_reserved[replicas - 1] += sectors;
|
2021-11-28 19:31:19 +00:00
|
|
|
|
2021-06-11 01:44:27 +00:00
|
|
|
preempt_enable();
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2021-06-11 01:44:27 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-02-19 07:48:27 +00:00
|
|
|
static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
|
|
|
|
struct bkey_s_c_reflink_p p,
|
2022-02-19 08:06:28 +00:00
|
|
|
u64 start, u64 end,
|
2021-10-21 19:48:05 +00:00
|
|
|
u64 *idx, unsigned flags, size_t r_idx)
|
2021-05-23 06:31:33 +00:00
|
|
|
{
|
2022-02-19 07:48:27 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-05-23 06:31:33 +00:00
|
|
|
struct reflink_gc *r;
|
|
|
|
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
2022-02-19 08:06:28 +00:00
|
|
|
u64 next_idx = end;
|
2021-10-19 16:27:47 +00:00
|
|
|
s64 ret = 0;
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-05-23 06:31:33 +00:00
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
if (r_idx >= c->reflink_gc_nr)
|
|
|
|
goto not_found;
|
2021-05-23 06:31:33 +00:00
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
r = genradix_ptr(&c->reflink_gc_table, r_idx);
|
2022-02-19 08:06:28 +00:00
|
|
|
next_idx = min(next_idx, r->offset - r->size);
|
|
|
|
if (*idx < next_idx)
|
2021-10-19 16:27:47 +00:00
|
|
|
goto not_found;
|
|
|
|
|
2021-05-23 06:31:33 +00:00
|
|
|
BUG_ON((s64) r->refcount + add < 0);
|
|
|
|
|
|
|
|
r->refcount += add;
|
2021-10-21 19:48:05 +00:00
|
|
|
*idx = r->offset;
|
|
|
|
return 0;
|
2021-05-23 06:31:33 +00:00
|
|
|
not_found:
|
2022-02-19 08:06:28 +00:00
|
|
|
if (fsck_err(c, "pointer to missing indirect extent\n"
|
|
|
|
" %s\n"
|
|
|
|
" missing range %llu-%llu",
|
2022-02-25 18:18:19 +00:00
|
|
|
(bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
|
2022-02-19 08:06:28 +00:00
|
|
|
*idx, next_idx)) {
|
2023-01-20 22:02:56 +00:00
|
|
|
struct bkey_i_error *new;
|
|
|
|
|
|
|
|
new = bch2_trans_kmalloc(trans, sizeof(*new));
|
|
|
|
ret = PTR_ERR_OR_ZERO(new);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
bkey_init(&new->k);
|
|
|
|
new->k.type = KEY_TYPE_error;
|
|
|
|
new->k.p = bkey_start_pos(p.k);
|
|
|
|
new->k.p.offset += *idx - start;
|
|
|
|
bch2_key_resize(&new->k, next_idx - *idx);
|
2023-01-19 12:27:30 +00:00
|
|
|
ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new->k_i,
|
2023-02-12 02:51:45 +00:00
|
|
|
BTREE_TRIGGER_NORUN);
|
2021-10-19 16:27:47 +00:00
|
|
|
}
|
2022-02-19 08:06:28 +00:00
|
|
|
|
|
|
|
*idx = next_idx;
|
2023-01-20 22:02:56 +00:00
|
|
|
err:
|
2021-10-19 16:27:47 +00:00
|
|
|
fsck_err:
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
2021-10-19 16:27:47 +00:00
|
|
|
return ret;
|
2021-05-23 06:31:33 +00:00
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_mark_reflink_p(struct btree_trans *trans,
|
2023-03-02 03:14:31 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_s_c new,
|
|
|
|
unsigned flags)
|
2021-05-23 06:31:33 +00:00
|
|
|
{
|
2021-10-29 22:43:18 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-10-19 22:31:33 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
2021-05-23 06:31:33 +00:00
|
|
|
struct reflink_gc *ref;
|
|
|
|
size_t l, r, m;
|
2022-02-19 08:06:28 +00:00
|
|
|
u64 idx = le64_to_cpu(p.v->idx), start = idx;
|
2021-11-06 04:05:12 +00:00
|
|
|
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
|
2021-10-21 19:48:05 +00:00
|
|
|
int ret = 0;
|
2021-05-23 06:31:33 +00:00
|
|
|
|
2021-12-26 03:37:19 +00:00
|
|
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
|
|
|
|
2021-11-06 04:05:12 +00:00
|
|
|
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
|
|
|
|
idx -= le32_to_cpu(p.v->front_pad);
|
|
|
|
end += le32_to_cpu(p.v->back_pad);
|
|
|
|
}
|
|
|
|
|
2021-05-23 06:31:33 +00:00
|
|
|
l = 0;
|
|
|
|
r = c->reflink_gc_nr;
|
|
|
|
while (l < r) {
|
|
|
|
m = l + (r - l) / 2;
|
|
|
|
|
|
|
|
ref = genradix_ptr(&c->reflink_gc_table, m);
|
|
|
|
if (ref->offset <= idx)
|
|
|
|
l = m + 1;
|
|
|
|
else
|
|
|
|
r = m;
|
|
|
|
}
|
|
|
|
|
2021-11-06 04:05:12 +00:00
|
|
|
while (idx < end && !ret)
|
2022-02-19 08:06:28 +00:00
|
|
|
ret = __bch2_mark_reflink_p(trans, p, start, end,
|
|
|
|
&idx, flags, l++);
|
2021-05-23 06:31:33 +00:00
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
return ret;
|
2021-05-23 06:31:33 +00:00
|
|
|
}
|
|
|
|
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 05:00:50 +00:00
|
|
|
void bch2_trans_fs_usage_revert(struct btree_trans *trans,
|
|
|
|
struct replicas_delta_list *deltas)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
struct bch_fs_usage *dst;
|
|
|
|
struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
|
|
|
|
s64 added = 0;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
percpu_down_read(&c->mark_lock);
|
|
|
|
preempt_disable();
|
|
|
|
dst = fs_usage_ptr(c, trans->journal_res.seq, false);
|
|
|
|
|
|
|
|
/* revert changes: */
|
|
|
|
for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
|
|
|
|
switch (d->r.data_type) {
|
|
|
|
case BCH_DATA_btree:
|
|
|
|
case BCH_DATA_user:
|
|
|
|
case BCH_DATA_parity:
|
|
|
|
added += d->delta;
|
|
|
|
}
|
|
|
|
BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
|
|
|
|
}
|
|
|
|
|
|
|
|
dst->nr_inodes -= deltas->nr_inodes;
|
|
|
|
|
|
|
|
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
|
|
|
|
added -= deltas->persistent_reserved[i];
|
|
|
|
dst->reserved -= deltas->persistent_reserved[i];
|
|
|
|
dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (added > 0) {
|
|
|
|
trans->disk_res->sectors += added;
|
|
|
|
this_cpu_add(*c->online_reserved, added);
|
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
percpu_up_read(&c->mark_lock);
|
|
|
|
}
|
|
|
|
|
2021-11-28 19:08:58 +00:00
|
|
|
int bch2_trans_fs_usage_apply(struct btree_trans *trans,
|
|
|
|
struct replicas_delta_list *deltas)
|
2021-04-04 00:29:05 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
static int warned_disk_usage = 0;
|
|
|
|
bool warn = false;
|
|
|
|
unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
|
2021-11-28 19:08:58 +00:00
|
|
|
struct replicas_delta *d = deltas->d, *d2;
|
2021-04-04 00:29:05 +00:00
|
|
|
struct replicas_delta *top = (void *) deltas->d + deltas->used;
|
|
|
|
struct bch_fs_usage *dst;
|
|
|
|
s64 added = 0, should_not_have_added;
|
|
|
|
unsigned i;
|
|
|
|
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2021-04-04 00:29:05 +00:00
|
|
|
preempt_disable();
|
|
|
|
dst = fs_usage_ptr(c, trans->journal_res.seq, false);
|
|
|
|
|
|
|
|
for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
|
|
|
|
switch (d->r.data_type) {
|
|
|
|
case BCH_DATA_btree:
|
|
|
|
case BCH_DATA_user:
|
|
|
|
case BCH_DATA_parity:
|
|
|
|
added += d->delta;
|
|
|
|
}
|
|
|
|
|
2021-11-28 19:08:58 +00:00
|
|
|
if (__update_replicas(c, dst, &d->r, d->delta))
|
|
|
|
goto need_mark;
|
2021-04-04 00:29:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dst->nr_inodes += deltas->nr_inodes;
|
|
|
|
|
|
|
|
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
|
|
|
|
added += deltas->persistent_reserved[i];
|
|
|
|
dst->reserved += deltas->persistent_reserved[i];
|
|
|
|
dst->persistent_reserved[i] += deltas->persistent_reserved[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not allowed to reduce sectors_available except by getting a
|
|
|
|
* reservation:
|
|
|
|
*/
|
|
|
|
should_not_have_added = added - (s64) disk_res_sectors;
|
|
|
|
if (unlikely(should_not_have_added > 0)) {
|
2021-06-11 03:33:27 +00:00
|
|
|
u64 old, new, v = atomic64_read(&c->sectors_available);
|
|
|
|
|
|
|
|
do {
|
|
|
|
old = v;
|
|
|
|
new = max_t(s64, 0, old - should_not_have_added);
|
|
|
|
} while ((v = atomic64_cmpxchg(&c->sectors_available,
|
|
|
|
old, new)) != old);
|
|
|
|
|
2021-04-04 00:29:05 +00:00
|
|
|
added -= should_not_have_added;
|
|
|
|
warn = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (added > 0) {
|
|
|
|
trans->disk_res->sectors -= added;
|
|
|
|
this_cpu_sub(*c->online_reserved, added);
|
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2021-04-04 00:29:05 +00:00
|
|
|
|
|
|
|
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
|
2023-01-25 15:07:52 +00:00
|
|
|
bch2_trans_inconsistent(trans,
|
|
|
|
"disk usage increased %lli more than %u sectors reserved)",
|
|
|
|
should_not_have_added, disk_res_sectors);
|
2021-11-28 19:08:58 +00:00
|
|
|
return 0;
|
|
|
|
need_mark:
|
|
|
|
/* revert changes: */
|
|
|
|
for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
|
|
|
|
BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
|
|
|
|
|
|
|
|
preempt_enable();
|
2021-11-28 19:31:19 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2021-11-28 19:08:58 +00:00
|
|
|
return -1;
|
2021-04-04 00:29:05 +00:00
|
|
|
}
|
|
|
|
|
2019-03-11 18:59:58 +00:00
|
|
|
/* trans_mark: */
|
|
|
|
|
2022-03-18 00:51:27 +00:00
|
|
|
static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
|
|
|
|
enum btree_id btree_id, unsigned level,
|
|
|
|
struct bkey_s_c k, struct extent_ptr_decoded p,
|
|
|
|
unsigned flags)
|
2020-10-20 02:36:24 +00:00
|
|
|
{
|
2022-03-18 00:51:27 +00:00
|
|
|
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2022-01-01 01:03:29 +00:00
|
|
|
struct bkey_i_alloc_v4 *a;
|
2023-03-31 20:24:45 +00:00
|
|
|
struct bpos bucket;
|
2022-03-18 00:51:27 +00:00
|
|
|
struct bch_backpointer bp;
|
|
|
|
s64 sectors;
|
2020-10-20 02:36:24 +00:00
|
|
|
int ret;
|
|
|
|
|
2023-03-31 20:24:45 +00:00
|
|
|
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
|
2022-03-18 00:51:27 +00:00
|
|
|
sectors = bp.bucket_len;
|
|
|
|
if (!insert)
|
|
|
|
sectors = -sectors;
|
|
|
|
|
2023-03-31 20:24:45 +00:00
|
|
|
a = bch2_trans_start_alloc_update(trans, &iter, bucket);
|
2022-01-01 01:03:29 +00:00
|
|
|
if (IS_ERR(a))
|
|
|
|
return PTR_ERR(a);
|
|
|
|
|
2022-03-18 00:51:27 +00:00
|
|
|
ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.gen, &a->v.data_type,
|
2022-03-18 00:51:27 +00:00
|
|
|
&a->v.dirty_sectors, &a->v.cached_sectors);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (!p.ptr.cached) {
|
2023-03-31 20:24:45 +00:00
|
|
|
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
|
2022-03-18 00:51:27 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
|
|
|
|
err:
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2019-03-11 18:59:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
2021-01-11 18:51:23 +00:00
|
|
|
struct extent_ptr_decoded p,
|
2020-07-09 22:31:51 +00:00
|
|
|
s64 sectors, enum bch_data_type data_type)
|
2019-03-11 18:59:58 +00:00
|
|
|
{
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2020-01-01 00:37:10 +00:00
|
|
|
struct bkey_i_stripe *s;
|
2020-07-09 22:31:51 +00:00
|
|
|
struct bch_replicas_padded r;
|
2019-03-11 18:59:58 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2023-04-28 03:48:33 +00:00
|
|
|
s = bch2_bkey_get_mut_typed(trans, &iter,
|
|
|
|
BTREE_ID_stripes, POS(0, p.ec.idx),
|
|
|
|
BTREE_ITER_WITH_UPDATES, stripe);
|
2022-11-24 03:13:19 +00:00
|
|
|
ret = PTR_ERR_OR_ZERO(s);
|
|
|
|
if (unlikely(ret)) {
|
2023-05-27 23:59:59 +00:00
|
|
|
bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
|
2019-08-16 13:59:56 +00:00
|
|
|
"pointer to nonexistent stripe %llu",
|
2021-01-11 18:51:23 +00:00
|
|
|
(u64) p.ec.idx);
|
2021-06-02 04:18:34 +00:00
|
|
|
goto err;
|
2021-01-11 18:51:23 +00:00
|
|
|
}
|
|
|
|
|
2022-11-24 03:13:19 +00:00
|
|
|
if (!bch2_ptr_matches_stripe(&s->v, p)) {
|
2022-03-03 03:18:56 +00:00
|
|
|
bch2_trans_inconsistent(trans,
|
2021-01-11 18:51:23 +00:00
|
|
|
"stripe pointer doesn't match stripe %llu",
|
|
|
|
(u64) p.ec.idx);
|
2019-08-16 13:59:56 +00:00
|
|
|
ret = -EIO;
|
2021-06-02 04:18:34 +00:00
|
|
|
goto err;
|
2019-03-11 18:59:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-11 18:51:23 +00:00
|
|
|
stripe_blockcount_set(&s->v, p.ec.block,
|
|
|
|
stripe_blockcount_get(&s->v, p.ec.block) +
|
2019-10-07 19:57:47 +00:00
|
|
|
sectors);
|
2021-12-05 04:07:33 +00:00
|
|
|
|
2020-07-09 22:31:51 +00:00
|
|
|
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
|
|
|
|
r.e.data_type = data_type;
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = update_replicas_list(trans, &r.e, sectors);
|
2021-06-02 04:18:34 +00:00
|
|
|
err:
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2019-03-11 18:59:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_trans_mark_extent(struct btree_trans *trans,
|
2022-04-01 01:44:55 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_i *new,
|
|
|
|
unsigned flags)
|
2019-03-11 18:59:58 +00:00
|
|
|
{
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
|
|
|
? old
|
|
|
|
: bkey_i_to_s_c(new);
|
2019-03-11 18:59:58 +00:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
|
|
|
const union bch_extent_entry *entry;
|
|
|
|
struct extent_ptr_decoded p;
|
|
|
|
struct bch_replicas_padded r;
|
2021-06-11 01:44:27 +00:00
|
|
|
enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
|
|
|
|
? BCH_DATA_btree
|
|
|
|
: BCH_DATA_user;
|
|
|
|
s64 sectors = bkey_is_btree_ptr(k.k)
|
2021-12-14 19:24:41 +00:00
|
|
|
? btree_sectors(c)
|
2021-06-11 01:44:27 +00:00
|
|
|
: k.k->size;
|
2019-03-11 18:59:58 +00:00
|
|
|
s64 dirty_sectors = 0;
|
|
|
|
bool stale;
|
2023-05-28 23:23:35 +00:00
|
|
|
int ret = 0;
|
2019-03-11 18:59:58 +00:00
|
|
|
|
|
|
|
r.e.data_type = data_type;
|
|
|
|
r.e.nr_devs = 0;
|
|
|
|
r.e.nr_required = 1;
|
|
|
|
|
|
|
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
2021-06-11 01:44:27 +00:00
|
|
|
s64 disk_sectors = ptr_disk_sectors(sectors, p);
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2021-08-17 19:29:21 +00:00
|
|
|
if (flags & BTREE_TRIGGER_OVERWRITE)
|
|
|
|
disk_sectors = -disk_sectors;
|
|
|
|
|
2022-03-18 00:51:27 +00:00
|
|
|
ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
|
2019-03-11 18:59:58 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
stale = ret > 0;
|
|
|
|
|
|
|
|
if (p.ptr.cached) {
|
2023-05-28 23:23:35 +00:00
|
|
|
if (!stale) {
|
|
|
|
ret = update_cached_sectors_list(trans, p.ptr.dev,
|
|
|
|
disk_sectors);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2019-10-08 22:45:29 +00:00
|
|
|
} else if (!p.has_ec) {
|
2019-03-11 18:59:58 +00:00
|
|
|
dirty_sectors += disk_sectors;
|
|
|
|
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
|
|
|
} else {
|
2021-01-11 18:51:23 +00:00
|
|
|
ret = bch2_trans_mark_stripe_ptr(trans, p,
|
2020-07-09 22:31:51 +00:00
|
|
|
disk_sectors, data_type);
|
2019-10-08 22:45:29 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-03-11 18:59:58 +00:00
|
|
|
|
|
|
|
r.e.nr_required = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-07 19:57:47 +00:00
|
|
|
if (r.e.nr_devs)
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = update_replicas_list(trans, &r.e, dirty_sectors);
|
2019-03-11 18:59:58 +00:00
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
return ret;
|
2019-03-11 18:59:58 +00:00
|
|
|
}
|
|
|
|
|
2021-11-29 21:38:27 +00:00
|
|
|
static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
|
|
|
|
struct bkey_s_c_stripe s,
|
|
|
|
unsigned idx, bool deleting)
|
2020-12-10 18:13:56 +00:00
|
|
|
{
|
2021-01-22 23:01:07 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
|
|
|
const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2022-01-01 01:03:29 +00:00
|
|
|
struct bkey_i_alloc_v4 *a;
|
2021-11-29 21:38:27 +00:00
|
|
|
enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
|
|
|
|
? BCH_DATA_parity : 0;
|
|
|
|
s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
|
2021-01-22 23:01:07 +00:00
|
|
|
int ret = 0;
|
2020-12-10 18:13:56 +00:00
|
|
|
|
2021-11-29 21:38:27 +00:00
|
|
|
if (deleting)
|
|
|
|
sectors = -sectors;
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
|
|
|
|
if (IS_ERR(a))
|
|
|
|
return PTR_ERR(a);
|
2020-12-10 18:13:56 +00:00
|
|
|
|
2023-02-12 00:31:03 +00:00
|
|
|
ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.gen, a->v.data_type,
|
|
|
|
a->v.dirty_sectors, a->v.cached_sectors);
|
2021-11-29 21:38:27 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2020-12-10 18:13:56 +00:00
|
|
|
|
2021-01-22 23:01:07 +00:00
|
|
|
if (!deleting) {
|
2022-01-01 01:03:29 +00:00
|
|
|
if (bch2_trans_inconsistent_on(a->v.stripe ||
|
|
|
|
a->v.stripe_redundancy, trans,
|
2021-11-29 21:38:27 +00:00
|
|
|
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
|
2022-01-01 01:03:29 +00:00
|
|
|
iter.pos.inode, iter.pos.offset, a->v.gen,
|
|
|
|
bch2_data_types[a->v.data_type],
|
|
|
|
a->v.dirty_sectors,
|
|
|
|
a->v.stripe, s.k->p.offset)) {
|
2021-01-22 23:01:07 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
2020-12-10 18:13:56 +00:00
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
|
2021-11-29 21:38:27 +00:00
|
|
|
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
|
2022-01-01 01:03:29 +00:00
|
|
|
iter.pos.inode, iter.pos.offset, a->v.gen,
|
|
|
|
bch2_data_types[a->v.data_type],
|
|
|
|
a->v.dirty_sectors,
|
2021-11-29 21:38:27 +00:00
|
|
|
s.k->p.offset)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.stripe = s.k->p.offset;
|
|
|
|
a->v.stripe_redundancy = s.v->nr_redundant;
|
2023-03-02 02:47:07 +00:00
|
|
|
a->v.data_type = BCH_DATA_stripe;
|
2021-01-22 23:01:07 +00:00
|
|
|
} else {
|
2022-01-01 01:03:29 +00:00
|
|
|
if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
|
|
|
|
a->v.stripe_redundancy != s.v->nr_redundant, trans,
|
2021-11-29 21:38:27 +00:00
|
|
|
"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
|
2022-01-01 01:03:29 +00:00
|
|
|
iter.pos.inode, iter.pos.offset, a->v.gen,
|
|
|
|
s.k->p.offset, a->v.stripe)) {
|
2021-11-29 21:38:27 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.stripe = 0;
|
|
|
|
a->v.stripe_redundancy = 0;
|
2023-03-02 02:47:07 +00:00
|
|
|
a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
|
2021-01-22 23:01:07 +00:00
|
|
|
}
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.dirty_sectors += sectors;
|
2021-11-29 21:38:27 +00:00
|
|
|
if (data_type)
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.data_type = !deleting ? data_type : 0;
|
2021-11-29 21:38:27 +00:00
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
|
2021-12-05 05:30:49 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2020-12-10 18:13:56 +00:00
|
|
|
err:
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2020-12-10 18:13:56 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_trans_mark_stripe(struct btree_trans *trans,
|
2022-04-01 01:44:55 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old, struct bkey_i *new,
|
|
|
|
unsigned flags)
|
2020-10-20 02:36:24 +00:00
|
|
|
{
|
2021-12-10 22:04:26 +00:00
|
|
|
const struct bch_stripe *old_s = NULL;
|
|
|
|
struct bch_stripe *new_s = NULL;
|
2020-07-09 22:31:51 +00:00
|
|
|
struct bch_replicas_padded r;
|
2021-11-29 21:38:27 +00:00
|
|
|
unsigned i, nr_blocks;
|
2020-10-20 02:36:24 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-01-22 23:01:07 +00:00
|
|
|
if (old.k->type == KEY_TYPE_stripe)
|
2021-12-10 22:04:26 +00:00
|
|
|
old_s = bkey_s_c_to_stripe(old).v;
|
|
|
|
if (new->k.type == KEY_TYPE_stripe)
|
|
|
|
new_s = &bkey_i_to_stripe(new)->v;
|
2021-01-22 23:01:07 +00:00
|
|
|
|
2020-10-20 02:36:24 +00:00
|
|
|
/*
|
2020-12-10 18:13:56 +00:00
|
|
|
* If the pointers aren't changing, we don't need to do anything:
|
2020-10-20 02:36:24 +00:00
|
|
|
*/
|
2021-12-10 22:04:26 +00:00
|
|
|
if (new_s && old_s &&
|
|
|
|
new_s->nr_blocks == old_s->nr_blocks &&
|
|
|
|
new_s->nr_redundant == old_s->nr_redundant &&
|
|
|
|
!memcmp(old_s->ptrs, new_s->ptrs,
|
|
|
|
new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
|
2020-12-10 18:13:56 +00:00
|
|
|
return 0;
|
2020-10-20 02:36:24 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
BUG_ON(new_s && old_s &&
|
|
|
|
(new_s->nr_blocks != old_s->nr_blocks ||
|
|
|
|
new_s->nr_redundant != old_s->nr_redundant));
|
2021-11-29 21:38:27 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
|
2021-11-29 21:38:27 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
if (new_s) {
|
|
|
|
s64 sectors = le16_to_cpu(new_s->sectors);
|
2020-07-09 22:31:51 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-12-10 18:13:56 +00:00
|
|
|
}
|
2020-07-09 22:31:51 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
if (old_s) {
|
|
|
|
s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
|
2020-12-10 18:13:56 +00:00
|
|
|
|
|
|
|
bch2_bkey_to_replicas(&r.e, old);
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-11-29 21:38:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_blocks; i++) {
|
2021-12-10 22:04:26 +00:00
|
|
|
if (new_s && old_s &&
|
|
|
|
!memcmp(&new_s->ptrs[i],
|
|
|
|
&old_s->ptrs[i],
|
|
|
|
sizeof(new_s->ptrs[i])))
|
2021-11-29 21:38:27 +00:00
|
|
|
continue;
|
2020-12-10 18:13:56 +00:00
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
if (new_s) {
|
|
|
|
ret = bch2_trans_mark_stripe_bucket(trans,
|
|
|
|
bkey_i_to_s_c_stripe(new), i, false);
|
2020-12-10 18:13:56 +00:00
|
|
|
if (ret)
|
2021-11-29 21:38:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-12-10 22:04:26 +00:00
|
|
|
if (old_s) {
|
|
|
|
ret = bch2_trans_mark_stripe_bucket(trans,
|
|
|
|
bkey_s_c_to_stripe(old), i, true);
|
2021-11-29 21:38:27 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
2020-12-10 18:13:56 +00:00
|
|
|
}
|
2020-10-20 02:36:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_trans_mark_inode(struct btree_trans *trans,
|
2022-04-01 01:44:55 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old,
|
|
|
|
struct bkey_i *new,
|
|
|
|
unsigned flags)
|
2021-06-11 01:44:27 +00:00
|
|
|
{
|
2021-12-10 22:04:26 +00:00
|
|
|
int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
|
2021-06-11 01:44:27 +00:00
|
|
|
|
|
|
|
if (nr) {
|
2023-05-28 23:23:35 +00:00
|
|
|
int ret = replicas_deltas_realloc(trans, 0);
|
|
|
|
struct replicas_delta_list *d = trans->fs_usage_deltas;
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-06-11 01:44:27 +00:00
|
|
|
d->nr_inodes += nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_trans_mark_reservation(struct btree_trans *trans,
|
2022-04-01 01:44:55 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old,
|
|
|
|
struct bkey_i *new,
|
|
|
|
unsigned flags)
|
2021-06-11 01:44:27 +00:00
|
|
|
{
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
|
|
|
? old
|
|
|
|
: bkey_i_to_s_c(new);
|
2021-06-11 01:44:27 +00:00
|
|
|
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
|
|
|
s64 sectors = (s64) k.k->size;
|
|
|
|
struct replicas_delta_list *d;
|
2023-05-28 23:23:35 +00:00
|
|
|
int ret;
|
2021-06-11 01:44:27 +00:00
|
|
|
|
|
|
|
if (flags & BTREE_TRIGGER_OVERWRITE)
|
|
|
|
sectors = -sectors;
|
|
|
|
sectors *= replicas;
|
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
ret = replicas_deltas_realloc(trans, 0);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-06-11 01:44:27 +00:00
|
|
|
|
2023-05-28 23:23:35 +00:00
|
|
|
d = trans->fs_usage_deltas;
|
2021-06-11 01:44:27 +00:00
|
|
|
replicas = clamp_t(unsigned, replicas, 1,
|
|
|
|
ARRAY_SIZE(d->persistent_reserved));
|
|
|
|
|
|
|
|
d->persistent_reserved[replicas - 1] += sectors;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-16 13:59:56 +00:00
|
|
|
static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
|
|
|
struct bkey_s_c_reflink_p p,
|
2021-10-21 19:48:05 +00:00
|
|
|
u64 *idx, unsigned flags)
|
2019-08-16 13:59:56 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2022-11-24 03:13:19 +00:00
|
|
|
struct bkey_i *k;
|
2020-10-24 23:51:34 +00:00
|
|
|
__le64 *refcount;
|
2021-05-04 00:31:27 +00:00
|
|
|
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
2022-02-25 18:18:19 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-10-21 19:48:05 +00:00
|
|
|
int ret;
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2023-04-30 22:46:24 +00:00
|
|
|
k = bch2_bkey_get_mut_noupdate(trans, &iter,
|
2023-04-28 03:48:33 +00:00
|
|
|
BTREE_ID_reflink, POS(0, *idx),
|
|
|
|
BTREE_ITER_WITH_UPDATES);
|
2022-11-24 03:13:19 +00:00
|
|
|
ret = PTR_ERR_OR_ZERO(k);
|
2021-06-02 04:18:34 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2022-11-24 03:13:19 +00:00
|
|
|
refcount = bkey_refcount(k);
|
2020-10-24 23:51:34 +00:00
|
|
|
if (!refcount) {
|
2022-02-25 18:18:19 +00:00
|
|
|
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
2022-03-03 03:18:56 +00:00
|
|
|
bch2_trans_inconsistent(trans,
|
2021-10-26 18:07:43 +00:00
|
|
|
"nonexistent indirect extent at %llu while marking\n %s",
|
2022-02-25 18:18:19 +00:00
|
|
|
*idx, buf.buf);
|
2020-10-24 23:51:34 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2021-10-14 13:54:47 +00:00
|
|
|
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
|
2022-02-25 18:18:19 +00:00
|
|
|
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
2022-03-03 03:18:56 +00:00
|
|
|
bch2_trans_inconsistent(trans,
|
2021-10-26 18:07:43 +00:00
|
|
|
"indirect extent refcount underflow at %llu while marking\n %s",
|
2022-02-25 18:18:19 +00:00
|
|
|
*idx, buf.buf);
|
2021-10-14 13:54:47 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BTREE_TRIGGER_INSERT) {
|
|
|
|
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
|
|
|
|
u64 pad;
|
|
|
|
|
|
|
|
pad = max_t(s64, le32_to_cpu(v->front_pad),
|
2022-11-24 03:13:19 +00:00
|
|
|
le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
|
2021-10-14 13:54:47 +00:00
|
|
|
BUG_ON(pad > U32_MAX);
|
|
|
|
v->front_pad = cpu_to_le32(pad);
|
|
|
|
|
|
|
|
pad = max_t(s64, le32_to_cpu(v->back_pad),
|
2022-11-24 03:13:19 +00:00
|
|
|
k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
|
2021-10-14 13:54:47 +00:00
|
|
|
BUG_ON(pad > U32_MAX);
|
|
|
|
v->back_pad = cpu_to_le32(pad);
|
|
|
|
}
|
|
|
|
|
2021-05-04 00:31:27 +00:00
|
|
|
le64_add_cpu(refcount, add);
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_btree_iter_set_pos_to_extent_start(&iter);
|
2022-11-24 03:13:19 +00:00
|
|
|
ret = bch2_trans_update(trans, &iter, k, 0);
|
2021-06-02 04:15:07 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2021-06-07 17:39:21 +00:00
|
|
|
|
2022-11-24 03:13:19 +00:00
|
|
|
*idx = k->k.p.offset;
|
2019-08-16 13:59:56 +00:00
|
|
|
err:
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2022-02-25 18:18:19 +00:00
|
|
|
printbuf_exit(&buf);
|
2019-08-16 13:59:56 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-03-13 05:26:52 +00:00
|
|
|
int bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
2022-04-01 01:44:55 +00:00
|
|
|
enum btree_id btree_id, unsigned level,
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c old,
|
|
|
|
struct bkey_i *new,
|
|
|
|
unsigned flags)
|
2019-08-16 13:59:56 +00:00
|
|
|
{
|
2022-03-13 05:26:52 +00:00
|
|
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
|
|
|
? old
|
|
|
|
: bkey_i_to_s_c(new);
|
2021-06-11 01:44:27 +00:00
|
|
|
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
2021-10-21 19:48:05 +00:00
|
|
|
u64 idx, end_idx;
|
|
|
|
int ret = 0;
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2021-10-14 13:54:47 +00:00
|
|
|
if (flags & BTREE_TRIGGER_INSERT) {
|
|
|
|
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
|
|
|
|
|
|
|
|
v->front_pad = v->back_pad = 0;
|
|
|
|
}
|
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
|
|
|
|
end_idx = le64_to_cpu(p.v->idx) + p.k->size +
|
|
|
|
le32_to_cpu(p.v->back_pad);
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
while (idx < end_idx && !ret)
|
|
|
|
ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
|
2019-08-16 13:59:56 +00:00
|
|
|
|
2021-10-21 19:48:05 +00:00
|
|
|
return ret;
|
2019-08-16 13:59:56 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 22:56:34 +00:00
|
|
|
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
|
|
|
struct bch_dev *ca, size_t b,
|
|
|
|
enum bch_data_type type,
|
|
|
|
unsigned sectors)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = trans->c;
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2022-01-01 01:03:29 +00:00
|
|
|
struct bkey_i_alloc_v4 *a;
|
2021-01-22 22:56:34 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-06-09 02:50:30 +00:00
|
|
|
/*
|
|
|
|
* Backup superblock might be past the end of our normal usable space:
|
|
|
|
*/
|
|
|
|
if (b >= ca->mi.nbuckets)
|
|
|
|
return 0;
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
|
|
|
|
if (IS_ERR(a))
|
|
|
|
return PTR_ERR(a);
|
2021-01-22 22:56:34 +00:00
|
|
|
|
2023-03-06 10:29:12 +00:00
|
|
|
if (a->v.data_type && type && a->v.data_type != type) {
|
2021-01-22 22:56:34 +00:00
|
|
|
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
|
|
|
|
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
|
|
|
|
"while marking %s",
|
2022-01-01 01:03:29 +00:00
|
|
|
iter.pos.inode, iter.pos.offset, a->v.gen,
|
|
|
|
bch2_data_types[a->v.data_type],
|
2021-01-22 22:56:34 +00:00
|
|
|
bch2_data_types[type],
|
|
|
|
bch2_data_types[type]);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-01-01 01:03:29 +00:00
|
|
|
a->v.data_type = type;
|
|
|
|
a->v.dirty_sectors = sectors;
|
2021-01-22 22:56:34 +00:00
|
|
|
|
2022-01-10 01:48:31 +00:00
|
|
|
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
|
2021-12-05 05:30:49 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2021-01-22 22:56:34 +00:00
|
|
|
out:
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2021-01-22 22:56:34 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
|
|
|
struct bch_dev *ca, size_t b,
|
|
|
|
enum bch_data_type type,
|
|
|
|
unsigned sectors)
|
|
|
|
{
|
2022-07-13 09:25:29 +00:00
|
|
|
return commit_do(trans, NULL, NULL, 0,
|
2021-04-15 00:25:33 +00:00
|
|
|
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
|
2021-01-22 22:56:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
|
|
|
struct bch_dev *ca,
|
|
|
|
u64 start, u64 end,
|
|
|
|
enum bch_data_type type,
|
|
|
|
u64 *bucket, unsigned *bucket_sectors)
|
|
|
|
{
|
|
|
|
do {
|
|
|
|
u64 b = sector_to_bucket(ca, start);
|
|
|
|
unsigned sectors =
|
|
|
|
min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
|
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
if (b != *bucket && *bucket_sectors) {
|
|
|
|
int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
|
|
|
|
type, *bucket_sectors);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-01-22 22:56:34 +00:00
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
*bucket_sectors = 0;
|
2021-01-22 22:56:34 +00:00
|
|
|
}
|
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
*bucket = b;
|
2021-01-22 22:56:34 +00:00
|
|
|
*bucket_sectors += sectors;
|
|
|
|
start += sectors;
|
2021-04-15 00:25:33 +00:00
|
|
|
} while (start < end);
|
2021-01-22 22:56:34 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
2021-04-15 00:25:33 +00:00
|
|
|
struct bch_dev *ca)
|
2021-01-22 22:56:34 +00:00
|
|
|
{
|
|
|
|
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
|
|
|
u64 bucket = 0;
|
|
|
|
unsigned i, bucket_sectors = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < layout->nr_superblocks; i++) {
|
|
|
|
u64 offset = le64_to_cpu(layout->sb_offset[i]);
|
|
|
|
|
|
|
|
if (offset == BCH_SB_SECTOR) {
|
2021-04-15 00:25:33 +00:00
|
|
|
ret = bch2_trans_mark_metadata_sectors(trans, ca,
|
2021-01-22 22:56:34 +00:00
|
|
|
0, BCH_SB_SECTOR,
|
|
|
|
BCH_DATA_sb, &bucket, &bucket_sectors);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
|
2021-01-22 22:56:34 +00:00
|
|
|
offset + (1 << layout->sb_max_size_bits),
|
|
|
|
BCH_DATA_sb, &bucket, &bucket_sectors);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bucket_sectors) {
|
2021-04-15 00:25:33 +00:00
|
|
|
ret = bch2_trans_mark_metadata_bucket(trans, ca,
|
2021-01-22 22:56:34 +00:00
|
|
|
bucket, BCH_DATA_sb, bucket_sectors);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ca->journal.nr; i++) {
|
2021-04-15 00:25:33 +00:00
|
|
|
ret = bch2_trans_mark_metadata_bucket(trans, ca,
|
2021-01-22 22:56:34 +00:00
|
|
|
ca->journal.buckets[i],
|
|
|
|
BCH_DATA_journal, ca->mi.bucket_size);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-15 00:25:33 +00:00
|
|
|
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
|
2021-01-22 22:56:34 +00:00
|
|
|
{
|
2022-07-14 06:08:58 +00:00
|
|
|
return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
|
2021-01-22 22:56:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Disk reservations: */
|
|
|
|
|
|
|
|
#define SECTORS_CACHE 1024
|
|
|
|
|
2022-11-01 02:28:09 +00:00
|
|
|
int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
2021-01-17 18:19:16 +00:00
|
|
|
u64 sectors, int flags)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2018-11-27 13:23:22 +00:00
|
|
|
struct bch_fs_pcpu *pcpu;
|
2017-03-17 06:18:50 +00:00
|
|
|
u64 old, v, get;
|
|
|
|
s64 sectors_available;
|
|
|
|
int ret;
|
|
|
|
|
2018-11-26 05:13:33 +00:00
|
|
|
percpu_down_read(&c->mark_lock);
|
2017-03-17 06:18:50 +00:00
|
|
|
preempt_disable();
|
2018-11-27 13:23:22 +00:00
|
|
|
pcpu = this_cpu_ptr(c->pcpu);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
if (sectors <= pcpu->sectors_available)
|
2017-03-17 06:18:50 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
v = atomic64_read(&c->sectors_available);
|
|
|
|
do {
|
|
|
|
old = v;
|
|
|
|
get = min((u64) sectors + SECTORS_CACHE, old);
|
|
|
|
|
|
|
|
if (get < sectors) {
|
|
|
|
preempt_enable();
|
|
|
|
goto recalculate;
|
|
|
|
}
|
|
|
|
} while ((v = atomic64_cmpxchg(&c->sectors_available,
|
|
|
|
old, old - get)) != old);
|
|
|
|
|
2018-11-27 13:23:22 +00:00
|
|
|
pcpu->sectors_available += get;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
out:
|
2018-11-27 13:23:22 +00:00
|
|
|
pcpu->sectors_available -= sectors;
|
2019-02-11 00:34:47 +00:00
|
|
|
this_cpu_add(*c->online_reserved, sectors);
|
2018-11-27 13:23:22 +00:00
|
|
|
res->sectors += sectors;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
preempt_enable();
|
2018-11-26 05:13:33 +00:00
|
|
|
percpu_up_read(&c->mark_lock);
|
2017-03-17 06:18:50 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
recalculate:
|
2020-12-03 19:17:33 +00:00
|
|
|
mutex_lock(&c->sectors_available_lock);
|
2019-02-12 03:08:09 +00:00
|
|
|
|
2020-12-03 19:17:33 +00:00
|
|
|
percpu_u64_set(&c->pcpu->sectors_available, 0);
|
|
|
|
sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (sectors <= sectors_available ||
|
|
|
|
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
|
|
|
|
atomic64_set(&c->sectors_available,
|
|
|
|
max_t(s64, 0, sectors_available - sectors));
|
2019-02-11 00:34:47 +00:00
|
|
|
this_cpu_add(*c->online_reserved, sectors);
|
2018-11-27 13:23:22 +00:00
|
|
|
res->sectors += sectors;
|
2017-03-17 06:18:50 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
atomic64_set(&c->sectors_available, sectors_available);
|
2022-09-18 21:10:33 +00:00
|
|
|
ret = -BCH_ERR_ENOSPC_disk_reservation;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 19:17:33 +00:00
|
|
|
mutex_unlock(&c->sectors_available_lock);
|
|
|
|
percpu_up_read(&c->mark_lock);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Startup/shutdown: */
|
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
static void bucket_gens_free_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bucket_gens *buckets =
|
|
|
|
container_of(rcu, struct bucket_gens, rcu);
|
|
|
|
|
2022-02-07 00:20:36 +00:00
|
|
|
kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
|
2021-12-26 00:55:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|
|
|
{
|
2021-12-26 00:55:34 +00:00
|
|
|
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
|
2018-11-19 06:16:07 +00:00
|
|
|
unsigned long *buckets_nouse = NULL;
|
2022-02-11 00:26:55 +00:00
|
|
|
bool resize = ca->bucket_gens != NULL;
|
2023-03-14 19:35:57 +00:00
|
|
|
int ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-02-11 00:26:55 +00:00
|
|
|
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
|
2023-03-14 19:35:57 +00:00
|
|
|
GFP_KERNEL|__GFP_ZERO))) {
|
|
|
|
ret = -BCH_ERR_ENOMEM_bucket_gens;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((c->opts.buckets_nouse &&
|
2022-02-14 06:42:31 +00:00
|
|
|
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
|
2017-03-17 06:18:50 +00:00
|
|
|
sizeof(unsigned long),
|
2023-03-14 19:35:57 +00:00
|
|
|
GFP_KERNEL|__GFP_ZERO)))) {
|
|
|
|
ret = -BCH_ERR_ENOMEM_buckets_nouse;
|
2017-03-17 06:18:50 +00:00
|
|
|
goto err;
|
2023-03-14 19:35:57 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
bucket_gens->first_bucket = ca->mi.first_bucket;
|
|
|
|
bucket_gens->nbuckets = nbuckets;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-07-11 20:28:54 +00:00
|
|
|
bch2_copygc_stop(c);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (resize) {
|
2020-06-17 21:30:38 +00:00
|
|
|
down_write(&c->gc_lock);
|
2017-03-17 06:18:50 +00:00
|
|
|
down_write(&ca->bucket_lock);
|
2018-11-26 05:13:33 +00:00
|
|
|
percpu_down_write(&c->mark_lock);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (resize) {
|
2022-02-11 00:26:55 +00:00
|
|
|
size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
memcpy(bucket_gens->b,
|
|
|
|
old_bucket_gens->b,
|
|
|
|
n);
|
2022-02-14 06:42:31 +00:00
|
|
|
if (buckets_nouse)
|
|
|
|
memcpy(buckets_nouse,
|
|
|
|
ca->buckets_nouse,
|
|
|
|
BITS_TO_LONGS(n) * sizeof(unsigned long));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2021-12-26 00:55:34 +00:00
|
|
|
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
|
|
|
|
bucket_gens = old_bucket_gens;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-11-19 06:16:07 +00:00
|
|
|
swap(ca->buckets_nouse, buckets_nouse);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-02-11 00:26:55 +00:00
|
|
|
nbuckets = ca->mi.nbuckets;
|
|
|
|
|
2020-06-17 21:30:38 +00:00
|
|
|
if (resize) {
|
2018-11-26 05:13:33 +00:00
|
|
|
percpu_up_write(&c->mark_lock);
|
2022-02-11 00:26:55 +00:00
|
|
|
up_write(&ca->bucket_lock);
|
2020-06-17 21:30:38 +00:00
|
|
|
up_write(&c->gc_lock);
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
err:
|
2018-11-19 06:16:07 +00:00
|
|
|
kvpfree(buckets_nouse,
|
2017-03-17 06:18:50 +00:00
|
|
|
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
|
2021-12-26 00:55:34 +00:00
|
|
|
if (bucket_gens)
|
2022-02-07 00:20:36 +00:00
|
|
|
call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_dev_buckets_free(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2018-11-19 06:16:07 +00:00
|
|
|
kvpfree(ca->buckets_nouse,
|
2017-03-17 06:18:50 +00:00
|
|
|
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
|
2022-02-07 00:20:36 +00:00
|
|
|
kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
|
|
|
|
sizeof(struct bucket_gens) + ca->mi.nbuckets);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
|
|
|
free_percpu(ca->usage[i]);
|
|
|
|
kfree(ca->usage_base);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
|
|
|
|
{
|
2021-01-22 02:52:06 +00:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
|
|
|
|
if (!ca->usage_base)
|
2023-03-14 19:35:57 +00:00
|
|
|
return -BCH_ERR_ENOMEM_usage_init;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-01-22 02:52:06 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
|
|
|
|
ca->usage[i] = alloc_percpu(struct bch_dev_usage);
|
|
|
|
if (!ca->usage[i])
|
2023-03-14 19:35:57 +00:00
|
|
|
return -BCH_ERR_ENOMEM_usage_init;
|
2021-01-22 02:52:06 +00:00
|
|
|
}
|
|
|
|
|
2022-10-19 22:31:33 +00:00
|
|
|
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|