2017-03-17 06:18:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2020-12-17 20:08:58 +00:00
|
|
|
#include "bkey_buf.h"
|
2018-10-06 04:46:55 +00:00
|
|
|
#include "alloc_background.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "btree_gc.h"
|
|
|
|
#include "btree_update.h"
|
|
|
|
#include "btree_update_interior.h"
|
|
|
|
#include "btree_io.h"
|
2019-01-25 01:25:40 +00:00
|
|
|
#include "buckets.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "dirent.h"
|
2018-11-01 19:13:19 +00:00
|
|
|
#include "ec.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "error.h"
|
2019-10-02 22:35:36 +00:00
|
|
|
#include "fs-common.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "fsck.h"
|
|
|
|
#include "journal_io.h"
|
2019-04-12 02:39:39 +00:00
|
|
|
#include "journal_reclaim.h"
|
2019-04-05 01:53:12 +00:00
|
|
|
#include "journal_seq_blacklist.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "quota.h"
|
|
|
|
#include "recovery.h"
|
2019-01-25 00:09:49 +00:00
|
|
|
#include "replicas.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "super-io.h"
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
#include <linux/sort.h>
|
2017-03-17 06:18:50 +00:00
|
|
|
#include <linux/stat.h>
|
|
|
|
|
|
|
|
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
|
|
|
|
|
2020-10-25 01:20:16 +00:00
|
|
|
/* for -o reconstruct_alloc: */
|
|
|
|
static void drop_alloc_keys(struct journal_keys *keys)
|
|
|
|
{
|
|
|
|
size_t src, dst;
|
|
|
|
|
|
|
|
for (src = 0, dst = 0; src < keys->nr; src++)
|
|
|
|
if (keys->d[src].btree_id != BTREE_ID_ALLOC)
|
|
|
|
keys->d[dst++] = keys->d[src];
|
|
|
|
|
|
|
|
keys->nr = dst;
|
|
|
|
}
|
|
|
|
|
2019-07-12 21:08:32 +00:00
|
|
|
/* iterate over keys read from the journal: */
|
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
static int __journal_key_cmp(enum btree_id l_btree_id,
|
|
|
|
unsigned l_level,
|
|
|
|
struct bpos l_pos,
|
|
|
|
struct journal_key *r)
|
|
|
|
{
|
|
|
|
return (cmp_int(l_btree_id, r->btree_id) ?:
|
|
|
|
cmp_int(l_level, r->level) ?:
|
|
|
|
bkey_cmp(l_pos, r->k->k.p));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
|
|
|
|
{
|
|
|
|
return (cmp_int(l->btree_id, r->btree_id) ?:
|
|
|
|
cmp_int(l->level, r->level) ?:
|
|
|
|
bkey_cmp(l->k->k.p, r->k->k.p));
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t journal_key_search(struct journal_keys *journal_keys,
|
|
|
|
enum btree_id id, unsigned level,
|
|
|
|
struct bpos pos)
|
2019-07-12 21:08:32 +00:00
|
|
|
{
|
2020-03-16 03:29:43 +00:00
|
|
|
size_t l = 0, r = journal_keys->nr, m;
|
2019-07-12 21:08:32 +00:00
|
|
|
|
2020-03-16 03:29:43 +00:00
|
|
|
while (l < r) {
|
|
|
|
m = l + ((r - l) >> 1);
|
2021-01-27 01:15:46 +00:00
|
|
|
if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
|
2020-03-16 03:29:43 +00:00
|
|
|
l = m + 1;
|
|
|
|
else
|
|
|
|
r = m;
|
2019-07-12 21:08:32 +00:00
|
|
|
}
|
|
|
|
|
2020-03-16 03:29:43 +00:00
|
|
|
BUG_ON(l < journal_keys->nr &&
|
2021-01-27 01:15:46 +00:00
|
|
|
__journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
|
2020-03-16 03:29:43 +00:00
|
|
|
|
|
|
|
BUG_ON(l &&
|
2021-01-27 01:15:46 +00:00
|
|
|
__journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
|
|
|
|
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
|
|
|
|
{
|
|
|
|
struct bkey_i *n = iter->keys->d[idx].k;
|
|
|
|
struct btree_and_journal_iter *biter =
|
|
|
|
container_of(iter, struct btree_and_journal_iter, journal);
|
|
|
|
|
|
|
|
if (iter->idx > idx ||
|
|
|
|
(iter->idx == idx &&
|
|
|
|
biter->last &&
|
|
|
|
bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
|
|
|
|
iter->idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
|
|
|
unsigned level, struct bkey_i *k)
|
|
|
|
{
|
|
|
|
struct journal_key n = {
|
|
|
|
.btree_id = id,
|
|
|
|
.level = level,
|
|
|
|
.k = k,
|
|
|
|
.allocated = true
|
|
|
|
};
|
|
|
|
struct journal_keys *keys = &c->journal_keys;
|
|
|
|
struct journal_iter *iter;
|
|
|
|
unsigned idx = journal_key_search(keys, id, level, k->k.p);
|
|
|
|
|
|
|
|
if (idx < keys->nr &&
|
|
|
|
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
|
|
|
if (keys->d[idx].allocated)
|
|
|
|
kfree(keys->d[idx].k);
|
|
|
|
keys->d[idx] = n;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keys->nr == keys->size) {
|
|
|
|
struct journal_keys new_keys = {
|
|
|
|
.nr = keys->nr,
|
|
|
|
.size = keys->size * 2,
|
|
|
|
.journal_seq_base = keys->journal_seq_base,
|
|
|
|
};
|
|
|
|
|
|
|
|
new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
|
2021-02-23 20:16:41 +00:00
|
|
|
if (!new_keys.d) {
|
|
|
|
bch_err(c, "%s: error allocating new key array (size %zu)",
|
|
|
|
__func__, new_keys.size);
|
2021-01-27 01:15:46 +00:00
|
|
|
return -ENOMEM;
|
2021-02-23 20:16:41 +00:00
|
|
|
}
|
2021-01-27 01:15:46 +00:00
|
|
|
|
|
|
|
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
|
|
|
|
kvfree(keys->d);
|
|
|
|
*keys = new_keys;
|
|
|
|
}
|
|
|
|
|
|
|
|
array_insert_item(keys->d, keys->nr, idx, n);
|
|
|
|
|
|
|
|
list_for_each_entry(iter, &c->journal_iters, list)
|
|
|
|
journal_iter_fix(c, iter, idx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
|
|
|
|
unsigned level, struct bpos pos)
|
|
|
|
{
|
|
|
|
struct bkey_i *whiteout =
|
|
|
|
kmalloc(sizeof(struct bkey), GFP_KERNEL);
|
|
|
|
int ret;
|
|
|
|
|
2021-02-23 20:16:41 +00:00
|
|
|
if (!whiteout) {
|
|
|
|
bch_err(c, "%s: error allocating new key", __func__);
|
2021-01-27 01:15:46 +00:00
|
|
|
return -ENOMEM;
|
2021-02-23 20:16:41 +00:00
|
|
|
}
|
2021-01-27 01:15:46 +00:00
|
|
|
|
|
|
|
bkey_init(&whiteout->k);
|
|
|
|
whiteout->k.p = pos;
|
2020-03-16 03:29:43 +00:00
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
ret = bch2_journal_key_insert(c, id, level, whiteout);
|
|
|
|
if (ret)
|
|
|
|
kfree(whiteout);
|
|
|
|
return ret;
|
2020-03-16 03:29:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
|
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
struct journal_key *k = iter->idx - iter->keys->nr
|
|
|
|
? iter->keys->d + iter->idx : NULL;
|
|
|
|
|
|
|
|
if (k &&
|
|
|
|
k->btree_id == iter->btree_id &&
|
|
|
|
k->level == iter->level)
|
|
|
|
return k->k;
|
2020-03-16 03:29:43 +00:00
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
iter->idx = iter->keys->nr;
|
2020-03-16 03:29:43 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_journal_iter_advance(struct journal_iter *iter)
|
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
if (iter->idx < iter->keys->nr)
|
|
|
|
iter->idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_journal_iter_exit(struct journal_iter *iter)
|
|
|
|
{
|
|
|
|
list_del(&iter->list);
|
2019-07-12 21:08:32 +00:00
|
|
|
}
|
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
static void bch2_journal_iter_init(struct bch_fs *c,
|
|
|
|
struct journal_iter *iter,
|
2020-03-16 03:29:43 +00:00
|
|
|
enum btree_id id, unsigned level,
|
|
|
|
struct bpos pos)
|
2019-07-12 21:08:32 +00:00
|
|
|
{
|
2020-03-16 03:29:43 +00:00
|
|
|
iter->btree_id = id;
|
|
|
|
iter->level = level;
|
2021-01-27 01:15:46 +00:00
|
|
|
iter->keys = &c->journal_keys;
|
|
|
|
iter->idx = journal_key_search(&c->journal_keys, id, level, pos);
|
|
|
|
list_add(&iter->list, &c->journal_iters);
|
2020-03-16 03:29:43 +00:00
|
|
|
}
|
2019-07-12 21:08:32 +00:00
|
|
|
|
2020-03-16 03:29:43 +00:00
|
|
|
static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
|
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
|
|
|
|
iter->b, &iter->unpacked);
|
2020-03-16 03:29:43 +00:00
|
|
|
}
|
2019-12-28 01:51:35 +00:00
|
|
|
|
2020-03-16 03:29:43 +00:00
|
|
|
static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
|
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
|
2019-07-12 21:08:32 +00:00
|
|
|
}
|
|
|
|
|
2019-12-28 01:51:35 +00:00
|
|
|
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
|
|
|
|
{
|
|
|
|
switch (iter->last) {
|
|
|
|
case none:
|
|
|
|
break;
|
|
|
|
case btree:
|
2020-03-16 03:29:43 +00:00
|
|
|
bch2_journal_iter_advance_btree(iter);
|
2019-12-28 01:51:35 +00:00
|
|
|
break;
|
|
|
|
case journal:
|
2020-03-16 03:29:43 +00:00
|
|
|
bch2_journal_iter_advance(&iter->journal);
|
2019-12-28 01:51:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
iter->last = none;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
|
|
|
|
{
|
|
|
|
struct bkey_s_c ret;
|
|
|
|
|
|
|
|
while (1) {
|
2020-03-16 03:29:43 +00:00
|
|
|
struct bkey_s_c btree_k =
|
|
|
|
bch2_journal_iter_peek_btree(iter);
|
|
|
|
struct bkey_s_c journal_k =
|
|
|
|
bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
|
2019-12-28 01:51:35 +00:00
|
|
|
|
|
|
|
if (btree_k.k && journal_k.k) {
|
|
|
|
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
|
|
|
|
|
|
|
|
if (!cmp)
|
2020-03-16 03:29:43 +00:00
|
|
|
bch2_journal_iter_advance_btree(iter);
|
2019-12-28 01:51:35 +00:00
|
|
|
|
|
|
|
iter->last = cmp < 0 ? btree : journal;
|
|
|
|
} else if (btree_k.k) {
|
|
|
|
iter->last = btree;
|
|
|
|
} else if (journal_k.k) {
|
|
|
|
iter->last = journal;
|
|
|
|
} else {
|
|
|
|
iter->last = none;
|
|
|
|
return bkey_s_c_null;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = iter->last == journal ? journal_k : btree_k;
|
2020-03-16 03:29:43 +00:00
|
|
|
|
|
|
|
if (iter->b &&
|
|
|
|
bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
|
2021-01-27 01:15:46 +00:00
|
|
|
iter->journal.idx = iter->journal.keys->nr;
|
2020-03-16 03:29:43 +00:00
|
|
|
iter->last = none;
|
|
|
|
return bkey_s_c_null;
|
|
|
|
}
|
|
|
|
|
2019-12-28 01:51:35 +00:00
|
|
|
if (!bkey_deleted(ret.k))
|
|
|
|
break;
|
|
|
|
|
|
|
|
bch2_btree_and_journal_iter_advance(iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
|
|
|
|
{
|
|
|
|
bch2_btree_and_journal_iter_advance(iter);
|
|
|
|
|
|
|
|
return bch2_btree_and_journal_iter_peek(iter);
|
|
|
|
}
|
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
|
2019-12-28 01:51:35 +00:00
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
bch2_journal_iter_exit(&iter->journal);
|
2020-03-16 03:29:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
|
2021-01-27 01:15:46 +00:00
|
|
|
struct bch_fs *c,
|
2020-03-16 03:29:43 +00:00
|
|
|
struct btree *b)
|
|
|
|
{
|
|
|
|
memset(iter, 0, sizeof(*iter));
|
|
|
|
|
|
|
|
iter->b = b;
|
|
|
|
bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
|
2021-01-27 01:15:46 +00:00
|
|
|
bch2_journal_iter_init(c, &iter->journal,
|
2020-03-30 22:11:13 +00:00
|
|
|
b->c.btree_id, b->c.level, b->data->min_key);
|
2019-12-28 01:51:35 +00:00
|
|
|
}
|
|
|
|
|
2020-05-24 18:06:10 +00:00
|
|
|
/* Walk btree, overlaying keys from the journal: */
|
|
|
|
|
2021-01-11 21:11:02 +00:00
|
|
|
static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
|
|
|
|
struct btree_and_journal_iter iter)
|
|
|
|
{
|
|
|
|
unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
struct bkey_buf tmp;
|
|
|
|
|
|
|
|
BUG_ON(!b->c.level);
|
|
|
|
|
|
|
|
bch2_bkey_buf_init(&tmp);
|
|
|
|
|
|
|
|
while (i < nr &&
|
|
|
|
(k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
|
|
|
bch2_bkey_buf_reassemble(&tmp, c, k);
|
|
|
|
|
|
|
|
bch2_btree_node_prefetch(c, NULL, tmp.k,
|
|
|
|
b->c.btree_id, b->c.level - 1);
|
|
|
|
|
|
|
|
bch2_btree_and_journal_iter_advance(&iter);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bch2_bkey_buf_exit(&tmp, c);
|
|
|
|
}
|
|
|
|
|
2020-05-24 18:06:10 +00:00
|
|
|
static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
|
|
|
|
struct journal_keys *journal_keys,
|
|
|
|
enum btree_id btree_id,
|
|
|
|
btree_walk_node_fn node_fn,
|
|
|
|
btree_walk_key_fn key_fn)
|
|
|
|
{
|
|
|
|
struct btree_and_journal_iter iter;
|
|
|
|
struct bkey_s_c k;
|
2021-01-11 21:11:02 +00:00
|
|
|
struct bkey_buf tmp;
|
|
|
|
struct btree *child;
|
2020-05-24 18:06:10 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-01-11 21:11:02 +00:00
|
|
|
bch2_bkey_buf_init(&tmp);
|
2021-01-27 01:15:46 +00:00
|
|
|
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
|
2020-05-24 18:06:10 +00:00
|
|
|
|
|
|
|
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
|
|
|
ret = key_fn(c, btree_id, b->c.level, k);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (b->c.level) {
|
2020-12-17 20:08:58 +00:00
|
|
|
bch2_bkey_buf_reassemble(&tmp, c, k);
|
2020-05-24 18:06:10 +00:00
|
|
|
|
|
|
|
bch2_btree_and_journal_iter_advance(&iter);
|
|
|
|
|
2020-12-17 20:08:58 +00:00
|
|
|
child = bch2_btree_node_get_noiter(c, tmp.k,
|
2021-01-27 01:59:00 +00:00
|
|
|
b->c.btree_id, b->c.level - 1,
|
|
|
|
false);
|
2020-12-17 20:08:58 +00:00
|
|
|
|
|
|
|
ret = PTR_ERR_OR_ZERO(child);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
2021-01-11 21:11:02 +00:00
|
|
|
btree_and_journal_iter_prefetch(c, b, iter);
|
|
|
|
|
2020-12-17 20:08:58 +00:00
|
|
|
ret = (node_fn ? node_fn(c, b) : 0) ?:
|
|
|
|
bch2_btree_and_journal_walk_recurse(c, child,
|
|
|
|
journal_keys, btree_id, node_fn, key_fn);
|
|
|
|
six_unlock_read(&child->c.lock);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
break;
|
2020-05-24 18:06:10 +00:00
|
|
|
} else {
|
|
|
|
bch2_btree_and_journal_iter_advance(&iter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
bch2_btree_and_journal_iter_exit(&iter);
|
2021-01-11 21:11:02 +00:00
|
|
|
bch2_bkey_buf_exit(&tmp, c);
|
2020-05-24 18:06:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
|
|
|
|
enum btree_id btree_id,
|
|
|
|
btree_walk_node_fn node_fn,
|
|
|
|
btree_walk_key_fn key_fn)
|
|
|
|
{
|
|
|
|
struct btree *b = c->btree_roots[btree_id].b;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (btree_node_fake(b))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
six_lock_read(&b->c.lock, NULL, NULL);
|
|
|
|
ret = (node_fn ? node_fn(c, b) : 0) ?:
|
|
|
|
bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
|
|
|
|
node_fn, key_fn) ?:
|
|
|
|
key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
|
|
|
|
six_unlock_read(&b->c.lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
/* sort and dedup all keys in the journal: */
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2020-03-25 20:12:33 +00:00
|
|
|
void bch2_journal_entries_free(struct list_head *list)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
while (!list_empty(list)) {
|
|
|
|
struct journal_replay *i =
|
|
|
|
list_first_entry(list, struct journal_replay, list);
|
|
|
|
list_del(&i->list);
|
|
|
|
kvpfree(i, offsetof(struct journal_replay, j) +
|
|
|
|
vstruct_bytes(&i->j));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
2019-04-12 02:39:39 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
/*
|
|
|
|
* When keys compare equal, oldest compares first:
|
|
|
|
*/
|
2019-04-12 02:39:39 +00:00
|
|
|
static int journal_sort_key_cmp(const void *_l, const void *_r)
|
|
|
|
{
|
|
|
|
const struct journal_key *l = _l;
|
|
|
|
const struct journal_key *r = _r;
|
|
|
|
|
2020-03-16 03:29:43 +00:00
|
|
|
return cmp_int(l->btree_id, r->btree_id) ?:
|
|
|
|
cmp_int(l->level, r->level) ?:
|
2019-12-30 19:37:25 +00:00
|
|
|
bkey_cmp(l->k->k.p, r->k->k.p) ?:
|
2019-04-12 02:39:39 +00:00
|
|
|
cmp_int(l->journal_seq, r->journal_seq) ?:
|
|
|
|
cmp_int(l->journal_offset, r->journal_offset);
|
|
|
|
}
|
|
|
|
|
2020-03-25 20:12:33 +00:00
|
|
|
void bch2_journal_keys_free(struct journal_keys *keys)
|
2019-04-12 02:39:39 +00:00
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
struct journal_key *i;
|
|
|
|
|
|
|
|
for (i = keys->d; i < keys->d + keys->nr; i++)
|
|
|
|
if (i->allocated)
|
|
|
|
kfree(i->k);
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
kvfree(keys->d);
|
|
|
|
keys->d = NULL;
|
|
|
|
keys->nr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
|
|
|
|
{
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
struct journal_replay *i;
|
2019-04-12 02:39:39 +00:00
|
|
|
struct jset_entry *entry;
|
|
|
|
struct bkey_i *k, *_n;
|
2019-12-30 19:37:25 +00:00
|
|
|
struct journal_keys keys = { NULL };
|
|
|
|
struct journal_key *src, *dst;
|
2019-04-12 02:39:39 +00:00
|
|
|
size_t nr_keys = 0;
|
|
|
|
|
2020-06-13 22:43:14 +00:00
|
|
|
if (list_empty(journal_entries))
|
|
|
|
return keys;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
list_for_each_entry(i, journal_entries, list) {
|
|
|
|
if (i->ignore)
|
2020-06-13 22:43:14 +00:00
|
|
|
continue;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
if (!keys.journal_seq_base)
|
|
|
|
keys.journal_seq_base = le64_to_cpu(i->j.seq);
|
|
|
|
|
|
|
|
for_each_jset_key(k, _n, entry, &i->j)
|
2019-04-12 02:39:39 +00:00
|
|
|
nr_keys++;
|
2020-06-13 22:43:14 +00:00
|
|
|
}
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
keys.size = roundup_pow_of_two(nr_keys);
|
|
|
|
|
|
|
|
keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
|
2019-04-12 02:39:39 +00:00
|
|
|
if (!keys.d)
|
|
|
|
goto err;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
list_for_each_entry(i, journal_entries, list) {
|
|
|
|
if (i->ignore)
|
2020-06-13 22:43:14 +00:00
|
|
|
continue;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
|
|
|
|
|
|
|
|
for_each_jset_key(k, _n, entry, &i->j)
|
2019-04-12 02:39:39 +00:00
|
|
|
keys.d[keys.nr++] = (struct journal_key) {
|
|
|
|
.btree_id = entry->btree_id,
|
2020-03-16 03:29:43 +00:00
|
|
|
.level = entry->level,
|
2019-04-12 02:39:39 +00:00
|
|
|
.k = k,
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
.journal_seq = le64_to_cpu(i->j.seq) -
|
2019-04-12 02:39:39 +00:00
|
|
|
keys.journal_seq_base,
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
.journal_offset = k->_data - i->j._data,
|
2019-04-12 02:39:39 +00:00
|
|
|
};
|
2020-06-13 22:43:14 +00:00
|
|
|
}
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2020-03-05 23:43:31 +00:00
|
|
|
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
src = dst = keys.d;
|
|
|
|
while (src < keys.d + keys.nr) {
|
|
|
|
while (src + 1 < keys.d + keys.nr &&
|
2020-03-16 03:29:43 +00:00
|
|
|
src[0].btree_id == src[1].btree_id &&
|
|
|
|
src[0].level == src[1].level &&
|
2019-12-30 19:37:25 +00:00
|
|
|
!bkey_cmp(src[0].k->k.p, src[1].k->k.p))
|
|
|
|
src++;
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
*dst++ = *src++;
|
2019-04-12 02:39:39 +00:00
|
|
|
}
|
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
keys.nr = dst - keys.d;
|
2019-04-12 02:39:39 +00:00
|
|
|
err:
|
2019-12-30 19:37:25 +00:00
|
|
|
return keys;
|
2019-04-12 02:39:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* journal replay: */
|
|
|
|
|
|
|
|
static void replay_now_at(struct journal *j, u64 seq)
|
|
|
|
{
|
|
|
|
BUG_ON(seq < j->replay_journal_seq);
|
|
|
|
BUG_ON(seq > j->replay_journal_seq_end);
|
|
|
|
|
|
|
|
while (j->replay_journal_seq < seq)
|
|
|
|
bch2_journal_pin_put(j, j->replay_journal_seq++);
|
|
|
|
}
|
|
|
|
|
2019-12-31 21:17:42 +00:00
|
|
|
static int __bch2_journal_replay_key(struct btree_trans *trans,
|
2020-03-16 02:32:03 +00:00
|
|
|
enum btree_id id, unsigned level,
|
|
|
|
struct bkey_i *k)
|
2019-12-31 21:17:42 +00:00
|
|
|
{
|
|
|
|
struct btree_iter *iter;
|
2020-03-09 20:15:54 +00:00
|
|
|
int ret;
|
2019-12-31 21:17:42 +00:00
|
|
|
|
2020-03-16 02:32:03 +00:00
|
|
|
iter = bch2_trans_get_node_iter(trans, id, k->k.p,
|
|
|
|
BTREE_MAX_DEPTH, level,
|
|
|
|
BTREE_ITER_INTENT);
|
2019-12-31 21:17:42 +00:00
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
/*
|
|
|
|
* iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
|
|
|
|
* extent_handle_overwrites() and extent_update_to_keys() - but we don't
|
|
|
|
* want that here, journal replay is supposed to treat extents like
|
|
|
|
* regular keys:
|
|
|
|
*/
|
|
|
|
__bch2_btree_iter_set_pos(iter, k->k.p, false);
|
|
|
|
|
2020-03-09 20:15:54 +00:00
|
|
|
ret = bch2_btree_iter_traverse(iter) ?:
|
|
|
|
bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
|
2020-03-07 18:30:55 +00:00
|
|
|
bch2_trans_iter_put(trans, iter);
|
2020-03-09 20:15:54 +00:00
|
|
|
return ret;
|
2019-12-31 21:17:42 +00:00
|
|
|
}
|
|
|
|
|
2021-01-27 01:15:46 +00:00
|
|
|
static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
|
2019-12-31 21:17:42 +00:00
|
|
|
{
|
2021-01-27 01:15:46 +00:00
|
|
|
unsigned commit_flags = BTREE_INSERT_NOFAIL|
|
|
|
|
BTREE_INSERT_LAZY_RW;
|
|
|
|
|
|
|
|
if (!k->allocated)
|
|
|
|
commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
|
|
|
|
|
|
|
|
return bch2_trans_do(c, NULL, NULL, commit_flags,
|
|
|
|
__bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
|
2019-12-31 21:17:42 +00:00
|
|
|
}
|
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
|
|
|
|
{
|
|
|
|
struct btree_iter *iter;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
|
|
|
|
BTREE_ITER_CACHED|
|
|
|
|
BTREE_ITER_CACHED_NOFILL|
|
|
|
|
BTREE_ITER_INTENT);
|
2020-12-02 04:11:53 +00:00
|
|
|
ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
|
2019-10-05 16:54:53 +00:00
|
|
|
bch2_trans_iter_put(trans, iter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
|
|
|
|
{
|
|
|
|
return bch2_trans_do(c, NULL, NULL,
|
|
|
|
BTREE_INSERT_NOFAIL|
|
|
|
|
BTREE_INSERT_USE_RESERVE|
|
|
|
|
BTREE_INSERT_LAZY_RW|
|
|
|
|
BTREE_INSERT_JOURNAL_REPLAY,
|
|
|
|
__bch2_alloc_replay_key(&trans, k));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
|
|
|
{
|
|
|
|
const struct journal_key *l = _l;
|
|
|
|
const struct journal_key *r = _r;
|
|
|
|
|
|
|
|
return cmp_int(r->level, l->level) ?:
|
|
|
|
cmp_int(l->journal_seq, r->journal_seq) ?:
|
|
|
|
cmp_int(l->btree_id, r->btree_id) ?:
|
|
|
|
bkey_cmp(l->k->k.p, r->k->k.p);
|
|
|
|
}
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
static int bch2_journal_replay(struct bch_fs *c,
|
|
|
|
struct journal_keys keys)
|
2019-04-12 02:39:39 +00:00
|
|
|
{
|
|
|
|
struct journal *j = &c->journal;
|
2019-04-12 02:39:39 +00:00
|
|
|
struct journal_key *i;
|
2019-10-05 16:54:53 +00:00
|
|
|
u64 seq;
|
2019-04-12 02:39:39 +00:00
|
|
|
int ret;
|
2019-03-29 23:13:54 +00:00
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
|
2019-03-29 23:13:54 +00:00
|
|
|
|
2020-03-25 21:57:29 +00:00
|
|
|
if (keys.nr)
|
|
|
|
replay_now_at(j, keys.journal_seq_base);
|
2020-03-16 02:32:03 +00:00
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
seq = j->replay_journal_seq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First replay updates to the alloc btree - these will only update the
|
|
|
|
* btree key cache:
|
|
|
|
*/
|
2019-04-12 02:39:39 +00:00
|
|
|
for_each_journal_key(keys, i) {
|
2019-10-05 16:54:53 +00:00
|
|
|
cond_resched();
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
|
|
|
|
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
2019-04-12 02:39:39 +00:00
|
|
|
ret = bch2_alloc_replay_key(c, i->k);
|
2019-10-05 16:54:53 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
/*
|
|
|
|
* Next replay updates to interior btree nodes:
|
|
|
|
*/
|
|
|
|
for_each_journal_key(keys, i) {
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
if (i->level) {
|
|
|
|
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
2021-01-27 01:15:46 +00:00
|
|
|
ret = bch2_journal_replay_key(c, i);
|
2019-10-05 16:54:53 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-04-12 02:39:39 +00:00
|
|
|
}
|
2019-10-05 16:54:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that the btree is in a consistent state, we can start journal
|
|
|
|
* reclaim (which will be flushing entries from the btree key cache back
|
|
|
|
* to the btree:
|
|
|
|
*/
|
|
|
|
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
|
|
|
|
set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
|
2020-11-29 22:09:13 +00:00
|
|
|
journal_reclaim_kick(j);
|
2019-10-05 16:54:53 +00:00
|
|
|
|
|
|
|
j->replay_journal_seq = seq;
|
2019-04-12 02:39:39 +00:00
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
/*
|
|
|
|
* Now replay leaf node updates:
|
|
|
|
*/
|
|
|
|
for_each_journal_key(keys, i) {
|
2019-04-12 02:39:39 +00:00
|
|
|
cond_resched();
|
2019-10-05 16:54:53 +00:00
|
|
|
|
|
|
|
if (i->level || i->btree_id == BTREE_ID_ALLOC)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
replay_now_at(j, keys.journal_seq_base + i->journal_seq);
|
|
|
|
|
2021-02-10 21:13:57 +00:00
|
|
|
ret = bch2_journal_replay_key(c, i);
|
2019-10-05 16:54:53 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-03-29 23:13:54 +00:00
|
|
|
}
|
2019-04-12 02:39:39 +00:00
|
|
|
|
|
|
|
replay_now_at(j, j->replay_journal_seq_end);
|
|
|
|
j->replay_journal_seq = 0;
|
|
|
|
|
|
|
|
bch2_journal_set_replay_done(j);
|
|
|
|
bch2_journal_flush_all_pins(j);
|
2019-04-12 02:39:39 +00:00
|
|
|
return bch2_journal_error(j);
|
2019-10-05 16:54:53 +00:00
|
|
|
err:
|
2021-01-27 01:59:00 +00:00
|
|
|
bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
|
|
|
|
ret, bch2_btree_ids[i->btree_id], i->level);
|
2019-10-05 16:54:53 +00:00
|
|
|
return ret;
|
2019-03-29 23:13:54 +00:00
|
|
|
}
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
/* journal replay early: */
|
2019-03-29 23:13:54 +00:00
|
|
|
|
2019-01-25 00:09:49 +00:00
|
|
|
static int journal_replay_entry_early(struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (entry->type) {
|
|
|
|
case BCH_JSET_ENTRY_btree_root: {
|
2019-06-24 22:11:35 +00:00
|
|
|
struct btree_root *r;
|
|
|
|
|
|
|
|
if (entry->btree_id >= BTREE_ID_NR) {
|
|
|
|
bch_err(c, "filesystem has unknown btree type %u",
|
|
|
|
entry->btree_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = &c->btree_roots[entry->btree_id];
|
2019-01-25 00:09:49 +00:00
|
|
|
|
|
|
|
if (entry->u64s) {
|
|
|
|
r->level = entry->level;
|
|
|
|
bkey_copy(&r->key, &entry->start[0]);
|
|
|
|
r->error = 0;
|
|
|
|
} else {
|
|
|
|
r->error = -EIO;
|
|
|
|
}
|
|
|
|
r->alive = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BCH_JSET_ENTRY_usage: {
|
|
|
|
struct jset_entry_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_usage, entry);
|
|
|
|
|
2019-02-10 00:20:57 +00:00
|
|
|
switch (entry->btree_id) {
|
|
|
|
case FS_USAGE_RESERVED:
|
|
|
|
if (entry->level < BCH_REPLICAS_MAX)
|
2019-02-11 00:34:47 +00:00
|
|
|
c->usage_base->persistent_reserved[entry->level] =
|
|
|
|
le64_to_cpu(u->v);
|
2019-01-25 00:09:49 +00:00
|
|
|
break;
|
|
|
|
case FS_USAGE_INODES:
|
2019-02-11 00:34:47 +00:00
|
|
|
c->usage_base->nr_inodes = le64_to_cpu(u->v);
|
2019-01-25 00:09:49 +00:00
|
|
|
break;
|
|
|
|
case FS_USAGE_KEY_VERSION:
|
|
|
|
atomic64_set(&c->key_version,
|
2019-02-10 00:20:57 +00:00
|
|
|
le64_to_cpu(u->v));
|
2019-01-25 00:09:49 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2019-02-10 00:20:57 +00:00
|
|
|
case BCH_JSET_ENTRY_data_usage: {
|
|
|
|
struct jset_entry_data_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_data_usage, entry);
|
2021-01-22 02:52:06 +00:00
|
|
|
|
2019-02-10 00:20:57 +00:00
|
|
|
ret = bch2_replicas_set_usage(c, &u->r,
|
|
|
|
le64_to_cpu(u->v));
|
|
|
|
break;
|
|
|
|
}
|
2021-01-22 02:52:06 +00:00
|
|
|
case BCH_JSET_ENTRY_dev_usage: {
|
|
|
|
struct jset_entry_dev_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_dev_usage, entry);
|
|
|
|
struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
|
|
|
|
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
|
|
|
unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
|
|
|
|
sizeof(struct jset_entry_dev_usage_type);
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
|
|
|
|
ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
|
|
|
|
|
|
|
|
for (i = 0; i < nr_types; i++) {
|
|
|
|
ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
|
|
|
|
ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
|
|
|
|
ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2019-04-05 01:53:12 +00:00
|
|
|
case BCH_JSET_ENTRY_blacklist: {
|
|
|
|
struct jset_entry_blacklist *bl_entry =
|
|
|
|
container_of(entry, struct jset_entry_blacklist, entry);
|
|
|
|
|
|
|
|
ret = bch2_journal_seq_blacklist_add(c,
|
|
|
|
le64_to_cpu(bl_entry->seq),
|
|
|
|
le64_to_cpu(bl_entry->seq) + 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BCH_JSET_ENTRY_blacklist_v2: {
|
|
|
|
struct jset_entry_blacklist_v2 *bl_entry =
|
|
|
|
container_of(entry, struct jset_entry_blacklist_v2, entry);
|
|
|
|
|
|
|
|
ret = bch2_journal_seq_blacklist_add(c,
|
|
|
|
le64_to_cpu(bl_entry->start),
|
|
|
|
le64_to_cpu(bl_entry->end) + 1);
|
|
|
|
break;
|
|
|
|
}
|
2021-01-21 20:28:59 +00:00
|
|
|
case BCH_JSET_ENTRY_clock: {
|
|
|
|
struct jset_entry_clock *clock =
|
|
|
|
container_of(entry, struct jset_entry_clock, entry);
|
|
|
|
|
|
|
|
atomic64_set(&c->io_clock[clock->rw].now, clock->time);
|
|
|
|
}
|
2019-01-25 00:09:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-05 01:53:12 +00:00
|
|
|
static int journal_replay_early(struct bch_fs *c,
|
|
|
|
struct bch_sb_field_clean *clean,
|
|
|
|
struct list_head *journal)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
struct journal_replay *i;
|
2019-03-29 23:13:54 +00:00
|
|
|
struct jset_entry *entry;
|
|
|
|
int ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (clean) {
|
|
|
|
for (entry = clean->start;
|
|
|
|
entry != vstruct_end(&clean->field);
|
|
|
|
entry = vstruct_next(entry)) {
|
|
|
|
ret = journal_replay_entry_early(c, entry);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
list_for_each_entry(i, journal, list) {
|
|
|
|
if (i->ignore)
|
|
|
|
continue;
|
2019-03-29 23:13:54 +00:00
|
|
|
|
|
|
|
vstruct_for_each(&i->j, entry) {
|
|
|
|
ret = journal_replay_entry_early(c, entry);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
}
|
2019-03-22 02:19:57 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
bch2_fs_usage_initialize(c);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
/* sb clean section: */
|
|
|
|
|
|
|
|
static struct bkey_i *btree_root_find(struct bch_fs *c,
|
|
|
|
struct bch_sb_field_clean *clean,
|
|
|
|
struct jset *j,
|
|
|
|
enum btree_id id, unsigned *level)
|
|
|
|
{
|
|
|
|
struct bkey_i *k;
|
|
|
|
struct jset_entry *entry, *start, *end;
|
|
|
|
|
|
|
|
if (clean) {
|
|
|
|
start = clean->start;
|
|
|
|
end = vstruct_end(&clean->field);
|
|
|
|
} else {
|
|
|
|
start = j->start;
|
|
|
|
end = vstruct_last(j);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (entry = start; entry < end; entry = vstruct_next(entry))
|
|
|
|
if (entry->type == BCH_JSET_ENTRY_btree_root &&
|
|
|
|
entry->btree_id == id)
|
|
|
|
goto found;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
found:
|
|
|
|
if (!entry->u64s)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
k = entry->start;
|
|
|
|
*level = entry->level;
|
|
|
|
return k;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int verify_superblock_clean(struct bch_fs *c,
|
|
|
|
struct bch_sb_field_clean **cleanp,
|
|
|
|
struct jset *j)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
struct bch_sb_field_clean *clean = *cleanp;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
|
|
|
|
"superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
|
|
|
|
le64_to_cpu(clean->journal_seq),
|
|
|
|
le64_to_cpu(j->seq))) {
|
|
|
|
kfree(clean);
|
|
|
|
*cleanp = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++) {
|
2020-05-25 18:57:06 +00:00
|
|
|
char buf1[200], buf2[200];
|
2019-04-12 02:39:39 +00:00
|
|
|
struct bkey_i *k1, *k2;
|
|
|
|
unsigned l1 = 0, l2 = 0;
|
|
|
|
|
|
|
|
k1 = btree_root_find(c, clean, NULL, i, &l1);
|
|
|
|
k2 = btree_root_find(c, NULL, j, i, &l2);
|
|
|
|
|
|
|
|
if (!k1 && !k2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
mustfix_fsck_err_on(!k1 || !k2 ||
|
|
|
|
IS_ERR(k1) ||
|
|
|
|
IS_ERR(k2) ||
|
|
|
|
k1->k.u64s != k2->k.u64s ||
|
|
|
|
memcmp(k1, k2, bkey_bytes(k1)) ||
|
|
|
|
l1 != l2, c,
|
2020-05-25 18:57:06 +00:00
|
|
|
"superblock btree root %u doesn't match journal after clean shutdown\n"
|
|
|
|
"sb: l=%u %s\n"
|
|
|
|
"journal: l=%u %s\n", i,
|
|
|
|
l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
|
|
|
|
l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
|
2019-04-12 02:39:39 +00:00
|
|
|
}
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_sb_field_clean *clean, *sb_clean;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
|
|
|
|
|
|
|
|
if (fsck_err_on(!sb_clean, c,
|
|
|
|
"superblock marked clean but clean section not present")) {
|
|
|
|
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
|
|
|
c->sb.clean = false;
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!clean) {
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (le16_to_cpu(c->disk_sb.sb->version) <
|
|
|
|
bcachefs_metadata_version_bkey_renumber)
|
|
|
|
bch2_sb_clean_renumber(clean, READ);
|
|
|
|
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
|
|
|
return clean;
|
|
|
|
fsck_err:
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
static int read_btree_roots(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
int ret = 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++) {
|
2019-03-29 23:13:54 +00:00
|
|
|
struct btree_root *r = &c->btree_roots[i];
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (!r->alive)
|
|
|
|
continue;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (i == BTREE_ID_ALLOC &&
|
2019-08-28 17:20:31 +00:00
|
|
|
c->opts.reconstruct_alloc) {
|
2019-03-29 23:13:54 +00:00
|
|
|
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
|
2017-03-17 06:18:50 +00:00
|
|
|
continue;
|
2019-03-29 23:13:54 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (r->error) {
|
|
|
|
__fsck_err(c, i == BTREE_ID_ALLOC
|
|
|
|
? FSCK_CAN_IGNORE : 0,
|
|
|
|
"invalid btree root %s",
|
|
|
|
bch2_btree_ids[i]);
|
|
|
|
if (i == BTREE_ID_ALLOC)
|
|
|
|
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_btree_root_read(c, i, &r->key, r->level);
|
|
|
|
if (ret) {
|
|
|
|
__fsck_err(c, i == BTREE_ID_ALLOC
|
|
|
|
? FSCK_CAN_IGNORE : 0,
|
|
|
|
"error reading btree root %s",
|
|
|
|
bch2_btree_ids[i]);
|
|
|
|
if (i == BTREE_ID_ALLOC)
|
|
|
|
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
2019-03-29 23:13:54 +00:00
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++)
|
|
|
|
if (!c->btree_roots[i].b)
|
|
|
|
bch2_btree_root_alloc(c, i);
|
2017-03-17 06:18:50 +00:00
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_fs_recovery(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
const char *err = "cannot allocate memory";
|
2019-04-05 01:53:12 +00:00
|
|
|
struct bch_sb_field_clean *clean = NULL;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
struct jset *last_journal_entry = NULL;
|
|
|
|
u64 blacklist_seq, journal_seq;
|
2021-01-09 02:20:58 +00:00
|
|
|
bool write_sb = false;
|
2017-03-17 06:18:50 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-04-05 01:53:12 +00:00
|
|
|
if (c->sb.clean)
|
|
|
|
clean = read_superblock_clean(c);
|
|
|
|
ret = PTR_ERR_OR_ZERO(clean);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (c->sb.clean)
|
2017-03-17 06:18:50 +00:00
|
|
|
bch_info(c, "recovering from clean shutdown, journal seq %llu",
|
|
|
|
le64_to_cpu(clean->journal_seq));
|
|
|
|
|
2021-02-03 20:31:17 +00:00
|
|
|
if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
|
|
|
|
bch_info(c, "alloc_v2 feature bit not set, fsck required");
|
|
|
|
c->opts.fsck = true;
|
|
|
|
c->opts.fix_errors = FSCK_OPT_YES;
|
|
|
|
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_alloc_v2;
|
|
|
|
}
|
|
|
|
|
2020-07-20 19:51:05 +00:00
|
|
|
if (!c->replicas.entries ||
|
|
|
|
c->opts.rebuild_replicas) {
|
2019-04-05 01:53:12 +00:00
|
|
|
bch_info(c, "building replicas info");
|
|
|
|
set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
|
|
|
|
}
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
ret = bch2_blacklist_table_initialize(c);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error initializing blacklist table");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2020-03-28 22:26:01 +00:00
|
|
|
if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
struct journal_replay *i;
|
2019-04-05 01:53:12 +00:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
ret = bch2_journal_read(c, &c->journal_entries,
|
|
|
|
&blacklist_seq, &journal_seq);
|
2017-03-17 06:18:50 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
list_for_each_entry_reverse(i, &c->journal_entries, list)
|
|
|
|
if (!i->ignore) {
|
|
|
|
last_journal_entry = &i->j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mustfix_fsck_err_on(c->sb.clean &&
|
|
|
|
last_journal_entry &&
|
|
|
|
!journal_entry_empty(last_journal_entry), c,
|
2019-03-11 18:59:58 +00:00
|
|
|
"filesystem marked clean but journal not empty")) {
|
|
|
|
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
|
|
|
|
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
|
|
|
c->sb.clean = false;
|
|
|
|
}
|
2019-04-05 01:53:12 +00:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
if (!last_journal_entry) {
|
|
|
|
fsck_err_on(!c->sb.clean, c, "no journal entries found");
|
|
|
|
goto use_clean;
|
2019-04-05 01:53:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 20:12:33 +00:00
|
|
|
c->journal_keys = journal_keys_sort(&c->journal_entries);
|
|
|
|
if (!c->journal_keys.d) {
|
2019-04-12 02:39:39 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
if (c->sb.clean && last_journal_entry) {
|
|
|
|
ret = verify_superblock_clean(c, &clean,
|
|
|
|
last_journal_entry);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
use_clean:
|
|
|
|
if (!clean) {
|
|
|
|
bch_err(c, "no superblock clean section found");
|
|
|
|
ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
|
2019-03-29 23:13:54 +00:00
|
|
|
goto err;
|
2019-04-05 01:53:12 +00:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
}
|
|
|
|
blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
|
2019-04-05 01:53:12 +00:00
|
|
|
}
|
|
|
|
|
2019-12-30 19:37:25 +00:00
|
|
|
if (!c->sb.clean &&
|
|
|
|
!(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
|
|
|
|
bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2020-10-25 01:20:16 +00:00
|
|
|
if (c->opts.reconstruct_alloc) {
|
|
|
|
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
|
|
|
|
drop_alloc_keys(&c->journal_keys);
|
|
|
|
}
|
|
|
|
|
2020-03-25 20:12:33 +00:00
|
|
|
ret = journal_replay_early(c, clean, &c->journal_entries);
|
2019-04-05 01:53:12 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
/*
|
|
|
|
* After an unclean shutdown, skip then next few journal sequence
|
|
|
|
* numbers as they may have been referenced by btree writes that
|
|
|
|
* happened before their corresponding journal writes - those btree
|
|
|
|
* writes need to be ignored, by skipping and blacklisting the next few
|
|
|
|
* journal sequence numbers:
|
|
|
|
*/
|
|
|
|
if (!c->sb.clean)
|
|
|
|
journal_seq += 8;
|
|
|
|
|
|
|
|
if (blacklist_seq != journal_seq) {
|
2019-04-05 01:53:12 +00:00
|
|
|
ret = bch2_journal_seq_blacklist_add(c,
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 14:59:58 +00:00
|
|
|
blacklist_seq, journal_seq);
|
2019-04-05 01:53:12 +00:00
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error creating new journal seq blacklist entry");
|
2019-03-29 23:13:54 +00:00
|
|
|
goto err;
|
2019-04-05 01:53:12 +00:00
|
|
|
}
|
2019-06-18 23:37:39 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-04-12 02:39:39 +00:00
|
|
|
ret = bch2_fs_journal_start(&c->journal, journal_seq,
|
2020-03-25 20:12:33 +00:00
|
|
|
&c->journal_entries);
|
2019-03-29 23:13:54 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
ret = read_btree_roots(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-11 18:59:58 +00:00
|
|
|
bch_verbose(c, "starting alloc read");
|
2017-03-17 06:18:50 +00:00
|
|
|
err = "error reading allocation information";
|
2020-03-25 20:12:33 +00:00
|
|
|
ret = bch2_alloc_read(c, &c->journal_keys);
|
2017-03-17 06:18:50 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-03-11 18:59:58 +00:00
|
|
|
bch_verbose(c, "alloc read done");
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-02-09 21:50:53 +00:00
|
|
|
bch_verbose(c, "starting stripes_read");
|
2019-03-11 18:59:58 +00:00
|
|
|
err = "error reading stripes";
|
2020-03-25 20:12:33 +00:00
|
|
|
ret = bch2_stripes_read(c, &c->journal_keys);
|
2018-11-23 07:50:33 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2019-02-09 21:50:53 +00:00
|
|
|
bch_verbose(c, "stripes_read done");
|
2018-11-26 01:53:51 +00:00
|
|
|
|
|
|
|
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
|
2018-11-23 07:50:33 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (c->opts.fsck ||
|
|
|
|
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
|
2021-01-22 02:51:42 +00:00
|
|
|
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
|
2019-03-29 23:13:54 +00:00
|
|
|
test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
|
2019-04-17 22:21:19 +00:00
|
|
|
bch_info(c, "starting mark and sweep");
|
2019-03-11 18:59:58 +00:00
|
|
|
err = "error in mark and sweep";
|
2021-01-27 01:15:46 +00:00
|
|
|
ret = bch2_gc(c, true);
|
2020-10-17 01:36:26 +00:00
|
|
|
if (ret)
|
2021-01-09 02:20:58 +00:00
|
|
|
goto err;
|
2019-02-06 16:56:51 +00:00
|
|
|
bch_verbose(c, "mark and sweep done");
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-01-14 21:19:23 +00:00
|
|
|
bch2_stripes_heap_start(c);
|
|
|
|
|
2018-11-04 02:00:50 +00:00
|
|
|
clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
|
2019-02-06 16:56:51 +00:00
|
|
|
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
|
2018-11-04 02:00:50 +00:00
|
|
|
|
2019-01-25 00:09:49 +00:00
|
|
|
/*
|
|
|
|
* Skip past versions that might have possibly been used (as nonces),
|
|
|
|
* but hadn't had their pointers written:
|
|
|
|
*/
|
|
|
|
if (c->sb.encryption_type && !c->sb.clean)
|
|
|
|
atomic64_add(1 << 16, &c->key_version);
|
|
|
|
|
2019-04-17 22:21:19 +00:00
|
|
|
if (c->opts.norecovery)
|
2019-03-29 23:13:54 +00:00
|
|
|
goto out;
|
|
|
|
|
2019-04-17 22:21:19 +00:00
|
|
|
bch_verbose(c, "starting journal replay");
|
2017-03-17 06:18:50 +00:00
|
|
|
err = "journal replay failed";
|
2020-03-25 20:12:33 +00:00
|
|
|
ret = bch2_journal_replay(c, c->journal_keys);
|
2017-03-17 06:18:50 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
bch_verbose(c, "journal replay done");
|
|
|
|
|
2021-01-09 02:20:58 +00:00
|
|
|
if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
|
|
|
|
!c->opts.nochanges) {
|
2019-04-17 22:21:19 +00:00
|
|
|
/*
|
|
|
|
* note that even when filesystem was clean there might be work
|
|
|
|
* to do here, if we ran gc (because of fsck) which recalculated
|
|
|
|
* oldest_gen:
|
|
|
|
*/
|
|
|
|
bch_verbose(c, "writing allocation info");
|
|
|
|
err = "error writing out alloc info";
|
2020-10-17 01:36:26 +00:00
|
|
|
ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
|
|
|
|
bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
|
2019-04-17 22:21:19 +00:00
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error writing alloc info");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
bch_verbose(c, "alloc write done");
|
2019-03-11 18:59:58 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:21:19 +00:00
|
|
|
if (!c->sb.clean) {
|
2019-12-29 01:17:06 +00:00
|
|
|
if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
|
2019-04-17 22:21:19 +00:00
|
|
|
bch_info(c, "checking inode link counts");
|
|
|
|
err = "error in recovery";
|
|
|
|
ret = bch2_fsck_inode_nlink(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
bch_verbose(c, "check inodes done");
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-04-17 22:21:19 +00:00
|
|
|
} else {
|
|
|
|
bch_verbose(c, "checking for deleted inodes");
|
|
|
|
err = "error in recovery";
|
|
|
|
ret = bch2_fsck_walk_inodes_only(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
bch_verbose(c, "check inodes done");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->opts.fsck) {
|
|
|
|
bch_info(c, "starting fsck");
|
|
|
|
err = "error in fsck";
|
|
|
|
ret = bch2_fsck_full(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
bch_verbose(c, "fsck done");
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-03-29 23:13:54 +00:00
|
|
|
if (enabled_qtypes(c)) {
|
2019-04-17 22:21:19 +00:00
|
|
|
bch_verbose(c, "reading quotas");
|
2019-03-29 23:13:54 +00:00
|
|
|
ret = bch2_fs_quota_read(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
bch_verbose(c, "quotas done");
|
|
|
|
}
|
|
|
|
|
2018-11-01 19:10:01 +00:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
if (c->opts.version_upgrade) {
|
|
|
|
if (c->sb.version < bcachefs_metadata_version_new_versioning)
|
|
|
|
c->disk_sb.sb->version_min =
|
|
|
|
le16_to_cpu(bcachefs_metadata_version_min);
|
|
|
|
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
|
2020-02-26 22:34:27 +00:00
|
|
|
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
2019-03-11 18:59:58 +00:00
|
|
|
write_sb = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!test_bit(BCH_FS_ERROR, &c->flags)) {
|
|
|
|
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
|
|
|
|
write_sb = true;
|
2018-07-15 01:06:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 13:34:55 +00:00
|
|
|
if (c->opts.fsck &&
|
|
|
|
!test_bit(BCH_FS_ERROR, &c->flags)) {
|
2019-12-29 01:17:06 +00:00
|
|
|
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
|
2019-03-28 13:34:55 +00:00
|
|
|
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
|
2019-03-11 18:59:58 +00:00
|
|
|
write_sb = true;
|
2019-03-28 13:34:55 +00:00
|
|
|
}
|
2019-03-11 18:59:58 +00:00
|
|
|
|
|
|
|
if (write_sb)
|
|
|
|
bch2_write_super(c);
|
2018-11-01 19:10:01 +00:00
|
|
|
mutex_unlock(&c->sb_lock);
|
2019-04-05 01:53:12 +00:00
|
|
|
|
|
|
|
if (c->journal_seq_blacklist_table &&
|
|
|
|
c->journal_seq_blacklist_table->nr > 128)
|
|
|
|
queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
|
2017-03-17 06:18:50 +00:00
|
|
|
out:
|
2019-04-17 22:21:19 +00:00
|
|
|
ret = 0;
|
|
|
|
err:
|
|
|
|
fsck_err:
|
2019-09-07 16:42:27 +00:00
|
|
|
set_bit(BCH_FS_FSCK_DONE, &c->flags);
|
2019-04-17 22:21:19 +00:00
|
|
|
bch2_flush_fsck_errs(c);
|
2019-09-07 16:42:27 +00:00
|
|
|
|
2020-03-25 20:12:33 +00:00
|
|
|
if (!c->opts.keep_journal) {
|
|
|
|
bch2_journal_keys_free(&c->journal_keys);
|
|
|
|
bch2_journal_entries_free(&c->journal_entries);
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
kfree(clean);
|
2019-04-17 22:21:19 +00:00
|
|
|
if (ret)
|
|
|
|
bch_err(c, "Error in recovery: %s (%i)", err, ret);
|
|
|
|
else
|
|
|
|
bch_verbose(c, "ret %i", ret);
|
2017-03-17 06:18:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_fs_initialize(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_inode_unpacked root_inode, lostfound_inode;
|
|
|
|
struct bkey_inode_buf packed_inode;
|
|
|
|
struct qstr lostfound = QSTR("lost+found");
|
|
|
|
const char *err = "cannot allocate memory";
|
|
|
|
struct bch_dev *ca;
|
|
|
|
LIST_HEAD(journal);
|
|
|
|
unsigned i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
bch_notice(c, "initializing new filesystem");
|
|
|
|
|
2019-01-25 01:25:40 +00:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
for_each_online_member(ca, c, i)
|
|
|
|
bch2_mark_dev_superblock(c, ca, 0);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2020-06-03 20:20:22 +00:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
c->disk_sb.sb->version = c->disk_sb.sb->version_min =
|
|
|
|
le16_to_cpu(bcachefs_metadata_version_current);
|
|
|
|
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
|
|
|
|
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
|
|
|
|
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
|
2019-02-11 00:16:55 +00:00
|
|
|
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-11-24 22:09:44 +00:00
|
|
|
for (i = 0; i < BTREE_ID_NR; i++)
|
|
|
|
bch2_btree_root_alloc(c, i);
|
|
|
|
|
2019-10-05 16:54:53 +00:00
|
|
|
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
|
|
|
|
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
err = "unable to allocate journal buckets";
|
2019-03-01 03:33:06 +00:00
|
|
|
for_each_online_member(ca, c, i) {
|
|
|
|
ret = bch2_dev_journal_alloc(ca);
|
|
|
|
if (ret) {
|
2017-03-17 06:18:50 +00:00
|
|
|
percpu_ref_put(&ca->io_ref);
|
|
|
|
goto err;
|
|
|
|
}
|
2019-03-01 03:33:06 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* journal_res_get() will crash if called before this has
|
|
|
|
* set up the journal.pin FIFO and journal.cur pointer:
|
|
|
|
*/
|
2019-04-05 01:53:12 +00:00
|
|
|
bch2_fs_journal_start(&c->journal, 1, &journal);
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_journal_set_replay_done(&c->journal);
|
|
|
|
|
2020-10-17 01:36:26 +00:00
|
|
|
err = "error going read-write";
|
|
|
|
ret = bch2_fs_read_write_early(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write out the superblock and journal buckets, now that we can do
|
|
|
|
* btree updates
|
|
|
|
*/
|
|
|
|
err = "error writing alloc info";
|
|
|
|
ret = bch2_alloc_write(c, 0);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_inode_init(c, &root_inode, 0, 0,
|
|
|
|
S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
|
|
|
|
root_inode.bi_inum = BCACHEFS_ROOT_INO;
|
2020-11-06 04:39:33 +00:00
|
|
|
bch2_inode_pack(c, &packed_inode, &root_inode);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
err = "error creating root directory";
|
|
|
|
ret = bch2_btree_insert(c, BTREE_ID_INODES,
|
|
|
|
&packed_inode.inode.k_i,
|
2020-10-17 01:36:26 +00:00
|
|
|
NULL, NULL, 0);
|
2017-03-17 06:18:50 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2019-10-02 22:35:36 +00:00
|
|
|
bch2_inode_init_early(c, &lostfound_inode);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
err = "error creating lost+found";
|
2019-12-23 04:39:28 +00:00
|
|
|
ret = bch2_trans_do(c, NULL, NULL, 0,
|
2019-10-02 22:35:36 +00:00
|
|
|
bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
|
|
|
|
&root_inode, &lostfound_inode,
|
|
|
|
&lostfound,
|
2019-11-10 03:15:40 +00:00
|
|
|
0, 0, S_IFDIR|0700, 0,
|
2019-10-02 22:35:36 +00:00
|
|
|
NULL, NULL));
|
2021-02-23 20:16:41 +00:00
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error creating lost+found");
|
2017-03-17 06:18:50 +00:00
|
|
|
goto err;
|
2021-02-23 20:16:41 +00:00
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (enabled_qtypes(c)) {
|
|
|
|
ret = bch2_fs_quota_read(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = "error writing first journal entry";
|
|
|
|
ret = bch2_journal_meta(&c->journal);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
|
|
|
|
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
|
|
|
|
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
2018-11-05 03:09:51 +00:00
|
|
|
pr_err("Error initializing new filesystem: %s (%i)", err, ret);
|
2017-03-17 06:18:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|