2017-03-17 06:18:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
|
|
* Copyright 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2018-10-06 04:46:55 +00:00
|
|
|
#include "alloc_foreground.h"
|
2020-12-17 20:08:58 +00:00
|
|
|
#include "bkey_buf.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "bset.h"
|
|
|
|
#include "btree_update.h"
|
|
|
|
#include "buckets.h"
|
|
|
|
#include "checksum.h"
|
|
|
|
#include "clock.h"
|
2023-09-10 22:05:17 +00:00
|
|
|
#include "compress.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "debug.h"
|
2018-11-01 19:13:19 +00:00
|
|
|
#include "ec.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "error.h"
|
2019-11-15 20:52:28 +00:00
|
|
|
#include "extent_update.h"
|
2019-10-09 16:11:00 +00:00
|
|
|
#include "inode.h"
|
2023-09-10 22:05:17 +00:00
|
|
|
#include "io_write.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "journal.h"
|
|
|
|
#include "keylist.h"
|
|
|
|
#include "move.h"
|
2022-12-15 01:52:11 +00:00
|
|
|
#include "nocow_locking.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "rebalance.h"
|
2021-03-13 01:30:39 +00:00
|
|
|
#include "subvolume.h"
|
2017-03-17 06:18:50 +00:00
|
|
|
#include "super.h"
|
|
|
|
#include "super-io.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
#include <linux/prefetch.h>
|
2017-03-17 06:18:50 +00:00
|
|
|
#include <linux/random.h>
|
2020-07-20 17:00:15 +00:00
|
|
|
#include <linux/sched/mm.h>
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
|
|
|
|
|
|
|
|
static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
|
|
|
|
u64 now, int rw)
|
|
|
|
{
|
|
|
|
u64 latency_capable =
|
|
|
|
ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
|
|
|
|
/* ideally we'd be taking into account the device's variance here: */
|
|
|
|
u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
|
|
|
|
s64 latency_over = io_latency - latency_threshold;
|
|
|
|
|
|
|
|
if (latency_threshold && latency_over > 0) {
|
|
|
|
/*
|
|
|
|
* bump up congested by approximately latency_over * 4 /
|
|
|
|
* latency_threshold - we don't need much accuracy here so don't
|
|
|
|
* bother with the divide:
|
|
|
|
*/
|
|
|
|
if (atomic_read(&ca->congested) < CONGESTED_MAX)
|
|
|
|
atomic_add(latency_over >>
|
|
|
|
max_t(int, ilog2(latency_threshold) - 2, 0),
|
|
|
|
&ca->congested);
|
|
|
|
|
|
|
|
ca->congested_last = now;
|
|
|
|
} else if (atomic_read(&ca->congested) > 0) {
|
|
|
|
atomic_dec(&ca->congested);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
|
|
|
|
{
|
|
|
|
atomic64_t *latency = &ca->cur_latency[rw];
|
|
|
|
u64 now = local_clock();
|
|
|
|
u64 io_latency = time_after64(now, submit_time)
|
|
|
|
? now - submit_time
|
|
|
|
: 0;
|
2024-05-23 09:19:26 +00:00
|
|
|
u64 old, new;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2024-05-23 09:19:26 +00:00
|
|
|
old = atomic64_read(latency);
|
2017-03-17 06:18:50 +00:00
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* If the io latency was reasonably close to the current
|
|
|
|
* latency, skip doing the update and atomic operation - most of
|
|
|
|
* the time:
|
|
|
|
*/
|
|
|
|
if (abs((int) (old - io_latency)) < (old >> 1) &&
|
2021-05-17 03:53:55 +00:00
|
|
|
now & ~(~0U << 5))
|
2017-03-17 06:18:50 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
new = ewma_add(old, io_latency, 5);
|
2024-05-23 09:19:26 +00:00
|
|
|
} while (!atomic64_try_cmpxchg(latency, &old, new));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
bch2_congested_acct(ca, io_latency, now, rw);
|
|
|
|
|
2024-02-01 20:41:42 +00:00
|
|
|
__bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Allocate, free from mempool: */
|
|
|
|
|
|
|
|
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
|
|
|
|
{
|
|
|
|
struct bvec_iter_all iter;
|
|
|
|
struct bio_vec *bv;
|
|
|
|
|
|
|
|
bio_for_each_segment_all(bv, bio, iter)
|
|
|
|
if (bv->bv_page != ZERO_PAGE(0))
|
|
|
|
mempool_free(bv->bv_page, &c->bio_bounce_pages);
|
|
|
|
bio->bi_vcnt = 0;
|
|
|
|
}
|
|
|
|
|
2019-07-03 23:27:42 +00:00
|
|
|
static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2019-07-03 23:27:42 +00:00
|
|
|
struct page *page;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (likely(!*using_mempool)) {
|
2023-05-28 22:02:38 +00:00
|
|
|
page = alloc_page(GFP_NOFS);
|
2019-07-03 23:27:42 +00:00
|
|
|
if (unlikely(!page)) {
|
2017-03-17 06:18:50 +00:00
|
|
|
mutex_lock(&c->bio_bounce_pages_lock);
|
|
|
|
*using_mempool = true;
|
|
|
|
goto pool_alloc;
|
|
|
|
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pool_alloc:
|
2023-05-28 22:02:38 +00:00
|
|
|
page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 23:27:42 +00:00
|
|
|
return page;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
|
2019-07-03 23:27:42 +00:00
|
|
|
size_t size)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
bool using_mempool = false;
|
|
|
|
|
2019-07-03 23:27:42 +00:00
|
|
|
while (size) {
|
|
|
|
struct page *page = __bio_alloc_page_pool(c, &using_mempool);
|
2020-11-05 17:16:05 +00:00
|
|
|
unsigned len = min_t(size_t, PAGE_SIZE, size);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-07-03 23:27:42 +00:00
|
|
|
BUG_ON(!bio_add_page(bio, page, len, 0));
|
|
|
|
size -= len;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (using_mempool)
|
|
|
|
mutex_unlock(&c->bio_bounce_pages_lock);
|
|
|
|
}
|
|
|
|
|
2019-10-09 16:11:00 +00:00
|
|
|
/* Extent update path: */
|
|
|
|
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
|
|
|
struct btree_iter *extent_iter,
|
|
|
|
struct bkey_i *new,
|
2021-05-20 19:49:23 +00:00
|
|
|
bool *usage_increasing,
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
s64 *i_sectors_delta,
|
|
|
|
s64 *disk_sectors_delta)
|
2019-10-09 16:11:00 +00:00
|
|
|
{
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
struct bch_fs *c = trans->c;
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2019-10-09 16:11:00 +00:00
|
|
|
struct bkey_s_c old;
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
|
|
|
|
bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
|
2019-10-09 16:11:00 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-05-20 19:49:23 +00:00
|
|
|
*usage_increasing = false;
|
2020-12-10 18:38:54 +00:00
|
|
|
*i_sectors_delta = 0;
|
|
|
|
*disk_sectors_delta = 0;
|
2019-10-09 16:11:00 +00:00
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_copy_iter(&iter, extent_iter);
|
2019-10-09 16:11:00 +00:00
|
|
|
|
2023-03-06 15:20:36 +00:00
|
|
|
for_each_btree_key_upto_continue_norestart(iter,
|
2024-04-07 22:05:34 +00:00
|
|
|
new->k.p, BTREE_ITER_slots, old, ret) {
|
2020-12-10 18:38:54 +00:00
|
|
|
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
|
|
|
|
max(bkey_start_offset(&new->k),
|
|
|
|
bkey_start_offset(old.k));
|
2019-10-09 16:11:00 +00:00
|
|
|
|
2020-12-10 18:38:54 +00:00
|
|
|
*i_sectors_delta += sectors *
|
2019-10-09 16:11:00 +00:00
|
|
|
(bkey_extent_is_allocation(&new->k) -
|
|
|
|
bkey_extent_is_allocation(old.k));
|
|
|
|
|
2021-03-21 04:03:34 +00:00
|
|
|
*disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
|
|
|
|
*disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
|
|
|
|
? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
|
|
|
|
: 0;
|
2020-12-10 18:38:54 +00:00
|
|
|
|
2021-05-20 19:49:23 +00:00
|
|
|
if (!*usage_increasing &&
|
2021-03-13 01:30:39 +00:00
|
|
|
(new->k.p.snapshot != old.k->p.snapshot ||
|
|
|
|
new_replicas > bch2_bkey_replicas(c, old) ||
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
(!new_compressed && bch2_bkey_sectors_compressed(old))))
|
2021-05-20 19:49:23 +00:00
|
|
|
*usage_increasing = true;
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
|
2021-11-08 17:30:47 +00:00
|
|
|
if (bkey_ge(old.k->p, new->k.p))
|
2019-10-09 16:11:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-08-30 19:18:31 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2019-10-09 16:11:00 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-11-15 04:41:18 +00:00
|
|
|
static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
|
|
|
struct btree_iter *extent_iter,
|
|
|
|
u64 new_i_size,
|
|
|
|
s64 i_sectors_delta)
|
|
|
|
{
|
2023-11-04 17:49:31 +00:00
|
|
|
/*
|
|
|
|
* Crazy performance optimization:
|
|
|
|
* Every extent update needs to also update the inode: the inode trigger
|
|
|
|
* will set bi->journal_seq to the journal sequence number of this
|
|
|
|
* transaction - for fsync.
|
|
|
|
*
|
|
|
|
* But if that's the only reason we're updating the inode (we're not
|
|
|
|
* updating bi_size or bi_sectors), then we don't need the inode update
|
|
|
|
* to be journalled - if we crash, the bi_journal_seq update will be
|
|
|
|
* lost, but that's fine.
|
|
|
|
*/
|
2024-04-07 22:05:34 +00:00
|
|
|
unsigned inode_update_flags = BTREE_UPDATE_nojournal;
|
2022-11-15 04:41:18 +00:00
|
|
|
|
2024-05-03 15:31:22 +00:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
2023-04-28 03:48:33 +00:00
|
|
|
SPOS(0,
|
|
|
|
extent_iter->pos.inode,
|
|
|
|
extent_iter->snapshot),
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_ITER_cached);
|
2024-05-03 15:31:22 +00:00
|
|
|
int ret = bkey_err(k);
|
2022-11-15 04:41:18 +00:00
|
|
|
if (unlikely(ret))
|
2023-04-28 03:48:33 +00:00
|
|
|
return ret;
|
2022-11-15 04:41:18 +00:00
|
|
|
|
2024-05-03 15:31:22 +00:00
|
|
|
/*
|
|
|
|
* varint_decode_fast(), in the inode .invalid method, reads up to 7
|
|
|
|
* bytes past the end of the buffer:
|
|
|
|
*/
|
|
|
|
struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
|
|
|
|
ret = PTR_ERR_OR_ZERO(k_mut);
|
|
|
|
if (unlikely(ret))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
bkey_reassemble(k_mut, k);
|
|
|
|
|
|
|
|
if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
|
|
|
|
k_mut = bch2_inode_to_v3(trans, k_mut);
|
|
|
|
ret = PTR_ERR_OR_ZERO(k_mut);
|
2022-11-15 04:41:18 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2024-05-03 15:31:22 +00:00
|
|
|
struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
|
2022-11-15 04:41:18 +00:00
|
|
|
|
2023-11-02 15:42:48 +00:00
|
|
|
if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
|
2022-11-15 04:41:18 +00:00
|
|
|
new_i_size > le64_to_cpu(inode->v.bi_size)) {
|
|
|
|
inode->v.bi_size = cpu_to_le64(new_i_size);
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i_sectors_delta) {
|
|
|
|
le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inode->k.p.snapshot != iter.snapshot) {
|
|
|
|
inode->k.p.snapshot = iter.snapshot;
|
|
|
|
inode_update_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_trans_update(trans, &iter, &inode->k_i,
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_UPDATE_internal_snapshot_node|
|
2022-11-15 04:41:18 +00:00
|
|
|
inode_update_flags);
|
|
|
|
err:
|
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-09 16:11:00 +00:00
|
|
|
int bch2_extent_update(struct btree_trans *trans,
|
2021-03-13 01:30:39 +00:00
|
|
|
subvol_inum inum,
|
2019-10-09 16:11:00 +00:00
|
|
|
struct btree_iter *iter,
|
|
|
|
struct bkey_i *k,
|
|
|
|
struct disk_reservation *disk_res,
|
|
|
|
u64 new_i_size,
|
2021-05-20 19:49:23 +00:00
|
|
|
s64 *i_sectors_delta_total,
|
|
|
|
bool check_enospc)
|
2019-10-09 16:11:00 +00:00
|
|
|
{
|
2021-08-29 23:34:37 +00:00
|
|
|
struct bpos next_pos;
|
2021-11-08 17:30:47 +00:00
|
|
|
bool usage_increasing;
|
2020-12-10 18:38:54 +00:00
|
|
|
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
|
2019-10-09 16:11:00 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-09-07 19:34:16 +00:00
|
|
|
/*
|
|
|
|
* This traverses us the iterator without changing iter->path->pos to
|
|
|
|
* search_key() (which is pos + 1 for extents): we want there to be a
|
|
|
|
* path already traversed at iter->pos because
|
|
|
|
* bch2_trans_extent_update() will use it to attempt extent merging
|
|
|
|
*/
|
|
|
|
ret = __bch2_btree_iter_traverse(iter);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-08-25 01:30:06 +00:00
|
|
|
ret = bch2_extent_trim_atomic(trans, iter, k);
|
2019-10-09 16:11:00 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-11-04 02:35:34 +00:00
|
|
|
next_pos = k->k.p;
|
|
|
|
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
ret = bch2_sum_sector_overwrites(trans, iter, k,
|
2021-05-20 19:49:23 +00:00
|
|
|
&usage_increasing,
|
2020-12-10 18:38:54 +00:00
|
|
|
&i_sectors_delta,
|
|
|
|
&disk_sectors_delta);
|
2019-10-09 16:11:00 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-12-10 18:38:54 +00:00
|
|
|
if (disk_res &&
|
|
|
|
disk_sectors_delta > (s64) disk_res->sectors) {
|
|
|
|
ret = bch2_disk_reservation_add(trans->c, disk_res,
|
|
|
|
disk_sectors_delta - disk_res->sectors,
|
2021-11-04 02:35:34 +00:00
|
|
|
!check_enospc || !usage_increasing
|
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation
whenever the new extent took up more space on disk than the old extent.
Erasure coding screwed this up, because with erasure coding writes are
initially replicated, and then in the background the extra replicas are
dropped when the stripe is created. This means that with erasure coding
enabled, writes will always take up more space on disk than the data
they're overwriting - but, according to posix, overwrites aren't
supposed to return ENOSPC.
So, in this patch we fudge things: if the new extent has more replicas
than the _effective_ replicas of the old extent, or if the old extent is
compressed and the new one isn't, we check for ENOSPC when getting the
disk reservation - otherwise, we don't.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-12-15 02:59:33 +00:00
|
|
|
? BCH_DISK_RESERVATION_NOFAIL : 0);
|
2020-12-10 18:38:54 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-10-21 17:42:38 +00:00
|
|
|
/*
|
|
|
|
* Note:
|
2022-11-15 04:41:18 +00:00
|
|
|
* We always have to do an inode update - even when i_size/i_sectors
|
2022-10-21 17:42:38 +00:00
|
|
|
* aren't changing - for fsync to work properly; fsync relies on
|
|
|
|
* inode->bi_journal_seq which is updated by the trigger code:
|
|
|
|
*/
|
2022-11-15 04:41:18 +00:00
|
|
|
ret = bch2_extent_update_i_size_sectors(trans, iter,
|
|
|
|
min(k->k.p.offset << 9, new_i_size),
|
|
|
|
i_sectors_delta) ?:
|
2022-10-21 17:42:38 +00:00
|
|
|
bch2_trans_update(trans, iter, k, 0) ?:
|
2022-11-03 04:29:43 +00:00
|
|
|
bch2_trans_commit(trans, disk_res, NULL,
|
2023-11-11 21:31:50 +00:00
|
|
|
BCH_TRANS_COMMIT_no_check_rw|
|
|
|
|
BCH_TRANS_COMMIT_no_enospc);
|
2022-10-21 17:42:38 +00:00
|
|
|
if (unlikely(ret))
|
2022-11-15 04:41:18 +00:00
|
|
|
return ret;
|
2019-10-09 16:11:00 +00:00
|
|
|
|
2020-12-10 18:38:54 +00:00
|
|
|
if (i_sectors_delta_total)
|
|
|
|
*i_sectors_delta_total += i_sectors_delta;
|
2021-11-04 02:35:34 +00:00
|
|
|
bch2_btree_iter_set_pos(iter, next_pos);
|
2022-11-15 04:41:18 +00:00
|
|
|
return 0;
|
2022-11-13 23:54:37 +00:00
|
|
|
}
|
|
|
|
|
2022-10-29 03:57:01 +00:00
|
|
|
static int bch2_write_index_default(struct bch_write_op *op)
|
2019-10-09 16:50:39 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2020-12-17 20:08:58 +00:00
|
|
|
struct bkey_buf sk;
|
2019-10-09 16:50:39 +00:00
|
|
|
struct keylist *keys = &op->insert_keys;
|
|
|
|
struct bkey_i *k = bch2_keylist_front(keys);
|
2023-09-12 21:16:02 +00:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
2021-08-30 19:18:31 +00:00
|
|
|
struct btree_iter iter;
|
2021-03-13 01:30:39 +00:00
|
|
|
subvol_inum inum = {
|
|
|
|
.subvol = op->subvol,
|
|
|
|
.inum = k->k.p.inode,
|
|
|
|
};
|
2019-10-09 16:50:39 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-03-13 01:30:39 +00:00
|
|
|
BUG_ON(!inum.subvol);
|
|
|
|
|
2020-12-17 20:08:58 +00:00
|
|
|
bch2_bkey_buf_init(&sk);
|
2019-10-09 16:50:39 +00:00
|
|
|
|
|
|
|
do {
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_begin(trans);
|
2019-12-20 21:35:24 +00:00
|
|
|
|
2019-11-09 21:01:15 +00:00
|
|
|
k = bch2_keylist_front(keys);
|
2021-03-13 01:30:39 +00:00
|
|
|
bch2_bkey_buf_copy(&sk, c, k);
|
2019-10-09 16:50:39 +00:00
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
|
2021-03-13 01:30:39 +00:00
|
|
|
&sk.k->k.p.snapshot);
|
2022-07-18 03:06:38 +00:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2021-03-13 01:30:39 +00:00
|
|
|
continue;
|
|
|
|
if (ret)
|
|
|
|
break;
|
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 22:02:16 +00:00
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
2021-03-13 01:30:39 +00:00
|
|
|
bkey_start_pos(&sk.k->k),
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_ITER_slots|BTREE_ITER_intent);
|
2019-10-09 16:50:39 +00:00
|
|
|
|
2024-01-16 21:20:21 +00:00
|
|
|
ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-20 17:33:14 +00:00
|
|
|
bch2_extent_update(trans, inum, &iter, sk.k,
|
|
|
|
&op->res,
|
|
|
|
op->new_i_size, &op->i_sectors_delta,
|
|
|
|
op->flags & BCH_WRITE_CHECK_ENOSPC);
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2021-03-13 01:30:39 +00:00
|
|
|
|
2022-07-18 03:06:38 +00:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2019-10-09 16:50:39 +00:00
|
|
|
continue;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
2022-11-24 08:12:22 +00:00
|
|
|
if (bkey_ge(iter.pos, k->k.p))
|
2021-03-13 01:30:39 +00:00
|
|
|
bch2_keylist_pop_front(&op->insert_keys);
|
|
|
|
else
|
|
|
|
bch2_cut_front(iter.pos, k);
|
2019-10-09 16:50:39 +00:00
|
|
|
} while (!bch2_keylist_empty(keys));
|
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_put(trans);
|
2020-12-17 20:08:58 +00:00
|
|
|
bch2_bkey_buf_exit(&sk, c);
|
2019-10-09 16:50:39 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* Writes */
|
|
|
|
|
|
|
|
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
|
|
|
enum bch_data_type type,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
const struct bkey_i *k,
|
|
|
|
bool nocow)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2018-11-01 19:10:01 +00:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
|
2017-03-17 06:18:50 +00:00
|
|
|
struct bch_write_bio *n;
|
|
|
|
|
|
|
|
BUG_ON(c->opts.nochanges);
|
|
|
|
|
2018-11-01 19:10:01 +00:00
|
|
|
bkey_for_each_ptr(ptrs, ptr) {
|
2024-05-03 16:53:27 +00:00
|
|
|
struct bch_dev *ca = nocow
|
|
|
|
? bch2_dev_have_ref(c, ptr->dev)
|
2024-04-30 19:37:51 +00:00
|
|
|
: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-11-01 19:10:01 +00:00
|
|
|
if (to_entry(ptr + 1) < ptrs.end) {
|
2024-05-01 00:32:44 +00:00
|
|
|
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
n->bio.bi_end_io = wbio->bio.bi_end_io;
|
|
|
|
n->bio.bi_private = wbio->bio.bi_private;
|
|
|
|
n->parent = wbio;
|
|
|
|
n->split = true;
|
|
|
|
n->bounce = false;
|
|
|
|
n->put_bio = true;
|
|
|
|
n->bio.bi_opf = wbio->bio.bi_opf;
|
|
|
|
bio_inc_remaining(&wbio->bio);
|
|
|
|
} else {
|
|
|
|
n = wbio;
|
|
|
|
n->split = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
n->c = c;
|
|
|
|
n->dev = ptr->dev;
|
2024-05-03 16:53:27 +00:00
|
|
|
n->have_ioref = ca != NULL;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
n->nocow = nocow;
|
2017-03-17 06:18:50 +00:00
|
|
|
n->submit_time = local_clock();
|
2022-11-16 01:25:08 +00:00
|
|
|
n->inode_offset = bkey_start_offset(&k->k);
|
2024-05-01 22:56:40 +00:00
|
|
|
if (nocow)
|
|
|
|
n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
|
2017-03-17 06:18:50 +00:00
|
|
|
n->bio.bi_iter.bi_sector = ptr->offset;
|
|
|
|
|
|
|
|
if (likely(n->have_ioref)) {
|
|
|
|
this_cpu_add(ca->io_done->sectors[WRITE][type],
|
|
|
|
bio_sectors(&n->bio));
|
|
|
|
|
|
|
|
bio_set_dev(&n->bio, ca->disk_sb.bdev);
|
|
|
|
|
2020-07-09 22:28:11 +00:00
|
|
|
if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
|
2017-03-17 06:18:50 +00:00
|
|
|
bio_endio(&n->bio);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
submit_bio(&n->bio);
|
|
|
|
} else {
|
|
|
|
n->bio.bi_status = BLK_STS_REMOVED;
|
|
|
|
bio_endio(&n->bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
static void __bch2_write(struct bch_write_op *);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-11-03 04:29:43 +00:00
|
|
|
static void bch2_write_done(struct closure *cl)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
|
2023-08-12 20:52:33 +00:00
|
|
|
EBUG_ON(op->open_buckets.nr);
|
|
|
|
|
|
|
|
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
|
2020-06-29 22:22:06 +00:00
|
|
|
bch2_disk_reservation_put(c, &op->res);
|
2023-08-12 20:52:33 +00:00
|
|
|
|
2023-03-14 02:01:47 +00:00
|
|
|
if (!(op->flags & BCH_WRITE_MOVE))
|
|
|
|
bch2_write_ref_put(c, BCH_WRITE_REF_write);
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_keylist_free(&op->insert_keys, op->inline_keys);
|
|
|
|
|
2022-10-29 06:47:33 +00:00
|
|
|
EBUG_ON(cl->parent);
|
|
|
|
closure_debug_destroy(cl);
|
2022-10-29 19:54:17 +00:00
|
|
|
if (op->end_io)
|
|
|
|
op->end_io(op);
|
|
|
|
}
|
|
|
|
|
2022-09-27 21:17:23 +00:00
|
|
|
static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct keylist *keys = &op->insert_keys;
|
2022-09-27 21:17:23 +00:00
|
|
|
struct bkey_i *src, *dst = keys->keys, *n;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
for (src = keys->keys; src != keys->top; src = n) {
|
|
|
|
n = bkey_next(src);
|
|
|
|
|
2019-11-09 21:43:16 +00:00
|
|
|
if (bkey_extent_is_direct_data(&src->k)) {
|
|
|
|
bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
|
|
|
|
test_bit(ptr->dev, op->failed.d));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-09-27 21:17:23 +00:00
|
|
|
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
|
|
|
|
return -EIO;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-11-09 21:43:16 +00:00
|
|
|
if (dst != src)
|
2023-03-05 04:05:55 +00:00
|
|
|
memmove_u64s_down(dst, src, src->k.u64s);
|
2017-03-17 06:18:50 +00:00
|
|
|
dst = bkey_next(dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
keys->top = dst;
|
2022-09-27 21:17:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-09-12 22:41:22 +00:00
|
|
|
* __bch2_write_index - after a write, update index to point to new data
|
|
|
|
* @op: bch_write_op to process
|
2022-09-27 21:17:23 +00:00
|
|
|
*/
|
|
|
|
static void __bch2_write_index(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct keylist *keys = &op->insert_keys;
|
|
|
|
unsigned dev;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
|
|
|
|
ret = bch2_write_drop_io_error_ptrs(op);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (!bch2_keylist_empty(keys)) {
|
|
|
|
u64 sectors_start = keylist_sectors(keys);
|
2022-10-29 03:57:01 +00:00
|
|
|
|
|
|
|
ret = !(op->flags & BCH_WRITE_MOVE)
|
|
|
|
? bch2_write_index_default(op)
|
2022-06-13 23:07:19 +00:00
|
|
|
: bch2_data_update_index_update(op);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-07-18 03:06:38 +00:00
|
|
|
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
2017-03-17 06:18:50 +00:00
|
|
|
BUG_ON(keylist_sectors(keys) && !ret);
|
|
|
|
|
|
|
|
op->written += sectors_start - keylist_sectors(keys);
|
|
|
|
|
2022-10-12 20:11:31 +00:00
|
|
|
if (ret && !bch2_err_matches(ret, EROFS)) {
|
2023-09-12 22:41:22 +00:00
|
|
|
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
2022-11-16 01:25:08 +00:00
|
|
|
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
2023-09-12 22:41:22 +00:00
|
|
|
insert->k.p.inode, insert->k.p.offset << 9,
|
2024-02-17 01:03:12 +00:00
|
|
|
"%s write error while doing btree update: %s",
|
|
|
|
op->flags & BCH_WRITE_MOVE ? "move" : "user",
|
2022-11-16 01:25:08 +00:00
|
|
|
bch2_err_str(ret));
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
2022-10-12 20:11:31 +00:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
out:
|
2018-11-01 19:13:19 +00:00
|
|
|
/* If some a bucket wasn't written, we can't erasure code it: */
|
|
|
|
for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
|
|
|
|
bch2_open_bucket_write_error(c, &op->open_buckets, dev);
|
|
|
|
|
2018-10-06 08:12:42 +00:00
|
|
|
bch2_open_buckets_put(c, &op->open_buckets);
|
2017-03-17 06:18:50 +00:00
|
|
|
return;
|
|
|
|
err:
|
|
|
|
keys->top = keys->keys;
|
|
|
|
op->error = ret;
|
2023-08-28 20:13:18 +00:00
|
|
|
op->flags |= BCH_WRITE_SUBMITTED;
|
2017-03-17 06:18:50 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
|
|
|
|
{
|
|
|
|
if (state != wp->state) {
|
|
|
|
u64 now = ktime_get_ns();
|
|
|
|
|
|
|
|
if (wp->last_state_change &&
|
|
|
|
time_after64(now, wp->last_state_change))
|
|
|
|
wp->time[wp->state] += now - wp->last_state_change;
|
|
|
|
wp->state = state;
|
|
|
|
wp->last_state_change = now;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void wp_update_state(struct write_point *wp, bool running)
|
|
|
|
{
|
|
|
|
enum write_point_state state;
|
|
|
|
|
|
|
|
state = running ? WRITE_POINT_running :
|
|
|
|
!list_empty(&wp->writes) ? WRITE_POINT_waiting_io
|
|
|
|
: WRITE_POINT_stopped;
|
|
|
|
|
|
|
|
__wp_update_state(wp, state);
|
|
|
|
}
|
|
|
|
|
2023-11-18 00:13:27 +00:00
|
|
|
static CLOSURE_CALLBACK(bch2_write_index)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2023-11-18 00:13:27 +00:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
2022-10-31 20:13:05 +00:00
|
|
|
struct write_point *wp = op->wp;
|
|
|
|
struct workqueue_struct *wq = index_update_wq(op);
|
2023-03-11 22:21:30 +00:00
|
|
|
unsigned long flags;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-08-28 20:13:18 +00:00
|
|
|
if ((op->flags & BCH_WRITE_SUBMITTED) &&
|
2023-03-05 08:11:00 +00:00
|
|
|
(op->flags & BCH_WRITE_MOVE))
|
|
|
|
bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
|
|
|
|
|
2023-03-11 22:21:30 +00:00
|
|
|
spin_lock_irqsave(&wp->writes_lock, flags);
|
2022-10-31 20:13:05 +00:00
|
|
|
if (wp->state == WRITE_POINT_waiting_io)
|
|
|
|
__wp_update_state(wp, WRITE_POINT_waiting_work);
|
2023-03-11 22:21:30 +00:00
|
|
|
list_add_tail(&op->wp_list, &wp->writes);
|
|
|
|
spin_unlock_irqrestore (&wp->writes_lock, flags);
|
2022-10-31 20:13:05 +00:00
|
|
|
|
|
|
|
queue_work(wq, &wp->index_update_work);
|
|
|
|
}
|
|
|
|
|
2023-03-01 04:08:04 +00:00
|
|
|
static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
|
|
|
|
{
|
|
|
|
op->wp = wp;
|
|
|
|
|
2023-03-11 22:21:30 +00:00
|
|
|
if (wp->state == WRITE_POINT_stopped) {
|
|
|
|
spin_lock_irq(&wp->writes_lock);
|
2023-03-01 04:08:04 +00:00
|
|
|
__wp_update_state(wp, WRITE_POINT_waiting_io);
|
2023-03-11 22:21:30 +00:00
|
|
|
spin_unlock_irq(&wp->writes_lock);
|
|
|
|
}
|
2023-03-01 04:08:04 +00:00
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
void bch2_write_point_do_index_updates(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct write_point *wp =
|
|
|
|
container_of(work, struct write_point, index_update_work);
|
|
|
|
struct bch_write_op *op;
|
|
|
|
|
|
|
|
while (1) {
|
2023-03-11 22:21:30 +00:00
|
|
|
spin_lock_irq(&wp->writes_lock);
|
|
|
|
op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
|
|
|
|
if (op)
|
|
|
|
list_del(&op->wp_list);
|
2022-10-31 20:13:05 +00:00
|
|
|
wp_update_state(wp, op != NULL);
|
2023-03-11 22:21:30 +00:00
|
|
|
spin_unlock_irq(&wp->writes_lock);
|
2022-10-31 20:13:05 +00:00
|
|
|
|
|
|
|
if (!op)
|
|
|
|
break;
|
|
|
|
|
|
|
|
op->flags |= BCH_WRITE_IN_WORKER;
|
|
|
|
|
|
|
|
__bch2_write_index(op);
|
|
|
|
|
2023-08-28 20:13:18 +00:00
|
|
|
if (!(op->flags & BCH_WRITE_SUBMITTED))
|
2022-10-31 20:13:05 +00:00
|
|
|
__bch2_write(op);
|
2022-11-03 04:29:43 +00:00
|
|
|
else
|
|
|
|
bch2_write_done(&op->cl);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_write_endio(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct closure *cl = bio->bi_private;
|
|
|
|
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
|
|
|
|
struct bch_write_bio *wbio = to_wbio(bio);
|
|
|
|
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
|
|
|
|
struct bch_fs *c = wbio->c;
|
2024-05-03 16:53:27 +00:00
|
|
|
struct bch_dev *ca = wbio->have_ioref
|
|
|
|
? bch2_dev_have_ref(c, wbio->dev)
|
|
|
|
: NULL;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2023-10-25 20:29:37 +00:00
|
|
|
if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
|
2020-12-03 18:57:22 +00:00
|
|
|
op->pos.inode,
|
2022-11-16 01:25:08 +00:00
|
|
|
wbio->inode_offset << 9,
|
2020-12-03 18:57:22 +00:00
|
|
|
"data write error: %s",
|
2022-09-27 21:17:23 +00:00
|
|
|
bch2_blk_status_to_str(bio->bi_status))) {
|
2017-03-17 06:18:50 +00:00
|
|
|
set_bit(wbio->dev, op->failed.d);
|
2022-09-27 21:17:23 +00:00
|
|
|
op->flags |= BCH_WRITE_IO_ERROR;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2024-05-01 22:56:40 +00:00
|
|
|
if (wbio->nocow) {
|
|
|
|
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
|
|
|
POS(ca->dev_idx, wbio->nocow_bucket),
|
|
|
|
BUCKET_NOCOW_LOCK_UPDATE);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
set_bit(wbio->dev, op->devs_need_flush->d);
|
2024-05-01 22:56:40 +00:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
if (wbio->have_ioref) {
|
|
|
|
bch2_latency_acct(ca, wbio->submit_time, WRITE);
|
|
|
|
percpu_ref_put(&ca->io_ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wbio->bounce)
|
|
|
|
bch2_bio_free_pages_pool(c, bio);
|
|
|
|
|
|
|
|
if (wbio->put_bio)
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
if (parent)
|
|
|
|
bio_endio(&parent->bio);
|
2019-11-02 01:16:51 +00:00
|
|
|
else
|
2022-10-31 20:13:05 +00:00
|
|
|
closure_put(cl);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void init_append_extent(struct bch_write_op *op,
|
|
|
|
struct write_point *wp,
|
|
|
|
struct bversion version,
|
|
|
|
struct bch_extent_crc_unpacked crc)
|
|
|
|
{
|
2019-10-29 07:57:58 +00:00
|
|
|
struct bkey_i_extent *e;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
op->pos.offset += crc.uncompressed_size;
|
2019-10-29 07:57:58 +00:00
|
|
|
|
|
|
|
e = bkey_extent_init(op->insert_keys.top);
|
2018-09-28 01:08:39 +00:00
|
|
|
e->k.p = op->pos;
|
|
|
|
e->k.size = crc.uncompressed_size;
|
|
|
|
e->k.version = version;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-09-28 01:08:39 +00:00
|
|
|
if (crc.csum_type ||
|
|
|
|
crc.compression_type ||
|
|
|
|
crc.nonce)
|
2019-07-25 17:52:14 +00:00
|
|
|
bch2_extent_crc_append(&e->k_i, crc);
|
2018-09-28 01:08:39 +00:00
|
|
|
|
2022-11-24 23:03:55 +00:00
|
|
|
bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
|
2021-12-26 02:14:49 +00:00
|
|
|
op->flags & BCH_WRITE_CACHED);
|
2018-12-05 17:28:35 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_keylist_push(&op->insert_keys);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
|
|
|
|
struct write_point *wp,
|
|
|
|
struct bio *src,
|
2018-11-01 19:13:19 +00:00
|
|
|
bool *page_alloc_failed,
|
|
|
|
void *buf)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_write_bio *wbio;
|
|
|
|
struct bio *bio;
|
|
|
|
unsigned output_available =
|
|
|
|
min(wp->sectors_free << 9, src->bi_iter.bi_size);
|
2019-08-22 20:41:50 +00:00
|
|
|
unsigned pages = DIV_ROUND_UP(output_available +
|
|
|
|
(buf
|
|
|
|
? ((unsigned long) buf & (PAGE_SIZE - 1))
|
|
|
|
: 0), PAGE_SIZE);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2021-10-13 17:45:46 +00:00
|
|
|
pages = min(pages, BIO_MAX_VECS);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
bio = bio_alloc_bioset(NULL, pages, 0,
|
2023-05-28 22:02:38 +00:00
|
|
|
GFP_NOFS, &c->bio_write);
|
2017-03-17 06:18:50 +00:00
|
|
|
wbio = wbio_init(bio);
|
|
|
|
wbio->put_bio = true;
|
|
|
|
/* copy WRITE_SYNC flag */
|
|
|
|
wbio->bio.bi_opf = src->bi_opf;
|
|
|
|
|
2018-11-01 19:13:19 +00:00
|
|
|
if (buf) {
|
2019-07-03 23:27:42 +00:00
|
|
|
bch2_bio_map(bio, buf, output_available);
|
2018-11-01 19:13:19 +00:00
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
wbio->bounce = true;
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/*
|
|
|
|
* We can't use mempool for more than c->sb.encoded_extent_max
|
|
|
|
* worth of pages, but we'd like to allocate more if we can:
|
|
|
|
*/
|
2019-07-03 23:27:42 +00:00
|
|
|
bch2_bio_alloc_pages_pool(c, bio,
|
|
|
|
min_t(unsigned, output_available,
|
2021-12-14 19:34:03 +00:00
|
|
|
c->opts.encoded_extent_max));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-07-03 23:27:42 +00:00
|
|
|
if (bio->bi_iter.bi_size < output_available)
|
|
|
|
*page_alloc_failed =
|
|
|
|
bch2_bio_alloc_pages(bio,
|
|
|
|
output_available -
|
|
|
|
bio->bi_iter.bi_size,
|
|
|
|
GFP_NOFS) != 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_write_rechecksum(struct bch_fs *c,
|
|
|
|
struct bch_write_op *op,
|
|
|
|
unsigned new_csum_type)
|
|
|
|
{
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
struct bch_extent_crc_unpacked new_crc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* bch2_rechecksum_bio() can't encrypt or decrypt data: */
|
|
|
|
|
|
|
|
if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
|
|
|
|
bch2_csum_type_is_encryption(new_csum_type))
|
|
|
|
new_csum_type = op->crc.csum_type;
|
|
|
|
|
|
|
|
ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
|
|
|
|
NULL, &new_crc,
|
|
|
|
op->crc.offset, op->crc.live_size,
|
|
|
|
new_csum_type);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bio_advance(bio, op->crc.offset << 9);
|
|
|
|
bio->bi_iter.bi_size = op->crc.live_size << 9;
|
|
|
|
op->crc = new_crc;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_write_decrypt(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct nonce nonce = extent_nonce(op->version, op->crc);
|
|
|
|
struct bch_csum csum;
|
2022-02-19 05:42:12 +00:00
|
|
|
int ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (!bch2_csum_type_is_encryption(op->crc.csum_type))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to decrypt data in the write path, we'll no longer be able
|
|
|
|
* to verify the existing checksum (poly1305 mac, in this case) after
|
|
|
|
* it's decrypted - this is the last point we'll be able to reverify the
|
|
|
|
* checksum:
|
|
|
|
*/
|
|
|
|
csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
|
2023-11-13 02:46:52 +00:00
|
|
|
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
|
2017-03-17 06:18:50 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2022-02-19 05:42:12 +00:00
|
|
|
ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
|
2017-03-17 06:18:50 +00:00
|
|
|
op->crc.csum_type = 0;
|
|
|
|
op->crc.csum = (struct bch_csum) { 0, 0 };
|
2022-02-19 05:42:12 +00:00
|
|
|
return ret;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum prep_encoded_ret {
|
|
|
|
PREP_ENCODED_OK,
|
|
|
|
PREP_ENCODED_ERR,
|
|
|
|
PREP_ENCODED_CHECKSUM_ERR,
|
|
|
|
PREP_ENCODED_DO_WRITE,
|
|
|
|
} bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
|
|
|
|
if (!(op->flags & BCH_WRITE_DATA_ENCODED))
|
|
|
|
return PREP_ENCODED_OK;
|
|
|
|
|
|
|
|
BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
|
|
|
|
|
|
|
|
/* Can we just write the entire extent as is? */
|
|
|
|
if (op->crc.uncompressed_size == op->crc.live_size &&
|
2023-10-22 15:19:34 +00:00
|
|
|
op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
|
2017-03-17 06:18:50 +00:00
|
|
|
op->crc.compressed_size <= wp->sectors_free &&
|
2023-07-13 02:27:16 +00:00
|
|
|
(op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
|
2018-02-23 21:26:10 +00:00
|
|
|
op->incompressible)) {
|
|
|
|
if (!crc_is_compressed(op->crc) &&
|
2017-03-17 06:18:50 +00:00
|
|
|
op->csum_type != op->crc.csum_type &&
|
2023-07-17 02:31:19 +00:00
|
|
|
bch2_write_rechecksum(c, op, op->csum_type) &&
|
|
|
|
!c->opts.no_data_io)
|
2017-03-17 06:18:50 +00:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
return PREP_ENCODED_DO_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the data is compressed and we couldn't write the entire extent as
|
|
|
|
* is, we have to decompress it:
|
|
|
|
*/
|
2018-02-23 21:26:10 +00:00
|
|
|
if (crc_is_compressed(op->crc)) {
|
2017-03-17 06:18:50 +00:00
|
|
|
struct bch_csum csum;
|
|
|
|
|
|
|
|
if (bch2_write_decrypt(op))
|
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
/* Last point we can still verify checksum: */
|
|
|
|
csum = bch2_checksum_bio(c, op->crc.csum_type,
|
|
|
|
extent_nonce(op->version, op->crc),
|
|
|
|
bio);
|
2023-07-17 02:31:19 +00:00
|
|
|
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
|
2017-03-17 06:18:50 +00:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
|
|
|
|
return PREP_ENCODED_ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No longer have compressed data after this point - data might be
|
|
|
|
* encrypted:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the data is checksummed and we're only writing a subset,
|
|
|
|
* rechecksum and adjust bio to point to currently live data:
|
|
|
|
*/
|
|
|
|
if ((op->crc.live_size != op->crc.uncompressed_size ||
|
|
|
|
op->crc.csum_type != op->csum_type) &&
|
2023-07-17 02:31:19 +00:00
|
|
|
bch2_write_rechecksum(c, op, op->csum_type) &&
|
|
|
|
!c->opts.no_data_io)
|
2017-03-17 06:18:50 +00:00
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we want to compress the data, it has to be decrypted:
|
|
|
|
*/
|
2023-07-13 02:27:16 +00:00
|
|
|
if ((op->compression_opt ||
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type) !=
|
|
|
|
bch2_csum_type_is_encryption(op->csum_type)) &&
|
|
|
|
bch2_write_decrypt(op))
|
|
|
|
return PREP_ENCODED_CHECKSUM_ERR;
|
|
|
|
|
|
|
|
return PREP_ENCODED_OK;
|
|
|
|
}
|
|
|
|
|
2019-11-02 01:16:51 +00:00
|
|
|
static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
|
|
|
|
struct bio **_dst)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bio *src = &op->wbio.bio, *dst = src;
|
|
|
|
struct bvec_iter saved_iter;
|
2018-11-01 19:13:19 +00:00
|
|
|
void *ec_buf;
|
|
|
|
unsigned total_output = 0, total_input = 0;
|
|
|
|
bool bounce = false;
|
|
|
|
bool page_alloc_failed = false;
|
2017-03-17 06:18:50 +00:00
|
|
|
int ret, more = 0;
|
|
|
|
|
|
|
|
BUG_ON(!bio_sectors(src));
|
|
|
|
|
2018-11-01 19:13:19 +00:00
|
|
|
ec_buf = bch2_writepoint_ec_buf(c, wp);
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
switch (bch2_write_prep_encoded_data(op, wp)) {
|
|
|
|
case PREP_ENCODED_OK:
|
|
|
|
break;
|
|
|
|
case PREP_ENCODED_ERR:
|
|
|
|
ret = -EIO;
|
|
|
|
goto err;
|
|
|
|
case PREP_ENCODED_CHECKSUM_ERR:
|
|
|
|
goto csum_err;
|
|
|
|
case PREP_ENCODED_DO_WRITE:
|
2019-11-02 01:16:51 +00:00
|
|
|
/* XXX look for bug here */
|
2018-11-01 19:13:19 +00:00
|
|
|
if (ec_buf) {
|
|
|
|
dst = bch2_write_bio_alloc(c, wp, src,
|
|
|
|
&page_alloc_failed,
|
|
|
|
ec_buf);
|
|
|
|
bio_copy_data(dst, src);
|
|
|
|
bounce = true;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
init_append_extent(op, wp, op->version, op->crc);
|
|
|
|
goto do_write;
|
|
|
|
}
|
|
|
|
|
2018-11-01 19:13:19 +00:00
|
|
|
if (ec_buf ||
|
2023-07-13 02:27:16 +00:00
|
|
|
op->compression_opt ||
|
2017-03-17 06:18:50 +00:00
|
|
|
(op->csum_type &&
|
|
|
|
!(op->flags & BCH_WRITE_PAGES_STABLE)) ||
|
|
|
|
(bch2_csum_type_is_encryption(op->csum_type) &&
|
|
|
|
!(op->flags & BCH_WRITE_PAGES_OWNED))) {
|
2018-11-01 19:13:19 +00:00
|
|
|
dst = bch2_write_bio_alloc(c, wp, src,
|
|
|
|
&page_alloc_failed,
|
|
|
|
ec_buf);
|
2017-03-17 06:18:50 +00:00
|
|
|
bounce = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
saved_iter = dst->bi_iter;
|
|
|
|
|
|
|
|
do {
|
2022-10-11 08:33:56 +00:00
|
|
|
struct bch_extent_crc_unpacked crc = { 0 };
|
2017-03-17 06:18:50 +00:00
|
|
|
struct bversion version = op->version;
|
2023-09-20 05:32:20 +00:00
|
|
|
size_t dst_len = 0, src_len = 0;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (page_alloc_failed &&
|
2021-12-14 19:34:03 +00:00
|
|
|
dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
|
|
|
|
dst->bi_iter.bi_size < c->opts.encoded_extent_max)
|
2017-03-17 06:18:50 +00:00
|
|
|
break;
|
|
|
|
|
2023-07-13 02:27:16 +00:00
|
|
|
BUG_ON(op->compression_opt &&
|
2017-03-17 06:18:50 +00:00
|
|
|
(op->flags & BCH_WRITE_DATA_ENCODED) &&
|
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type));
|
2023-07-13 02:27:16 +00:00
|
|
|
BUG_ON(op->compression_opt && !bounce);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2018-02-23 21:26:10 +00:00
|
|
|
crc.compression_type = op->incompressible
|
|
|
|
? BCH_COMPRESSION_TYPE_incompressible
|
2023-07-13 02:27:16 +00:00
|
|
|
: op->compression_opt
|
2018-02-23 21:26:10 +00:00
|
|
|
? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
|
2023-07-13 02:27:16 +00:00
|
|
|
op->compression_opt)
|
2017-03-17 06:18:50 +00:00
|
|
|
: 0;
|
2018-02-23 21:26:10 +00:00
|
|
|
if (!crc_is_compressed(crc)) {
|
2017-03-17 06:18:50 +00:00
|
|
|
dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
|
|
|
|
dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
|
|
|
|
|
|
|
|
if (op->csum_type)
|
|
|
|
dst_len = min_t(unsigned, dst_len,
|
2021-12-14 19:34:03 +00:00
|
|
|
c->opts.encoded_extent_max);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (bounce) {
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
bio_copy_data(dst, src);
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
src_len = dst_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!src_len || !dst_len);
|
|
|
|
|
|
|
|
if (bch2_csum_type_is_encryption(op->csum_type)) {
|
|
|
|
if (bversion_zero(version)) {
|
2020-02-19 01:02:41 +00:00
|
|
|
version.lo = atomic64_inc_return(&c->key_version);
|
2017-03-17 06:18:50 +00:00
|
|
|
} else {
|
|
|
|
crc.nonce = op->nonce;
|
|
|
|
op->nonce += src_len >> 9;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
|
2018-02-23 21:26:10 +00:00
|
|
|
!crc_is_compressed(crc) &&
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_csum_type_is_encryption(op->crc.csum_type) ==
|
|
|
|
bch2_csum_type_is_encryption(op->csum_type)) {
|
2022-10-11 08:33:56 +00:00
|
|
|
u8 compression_type = crc.compression_type;
|
|
|
|
u16 nonce = crc.nonce;
|
2017-03-17 06:18:50 +00:00
|
|
|
/*
|
|
|
|
* Note: when we're using rechecksum(), we need to be
|
|
|
|
* checksumming @src because it has all the data our
|
|
|
|
* existing checksum covers - if we bounced (because we
|
|
|
|
* were trying to compress), @dst will only have the
|
|
|
|
* part of the data the new checksum will cover.
|
|
|
|
*
|
|
|
|
* But normally we want to be checksumming post bounce,
|
|
|
|
* because part of the reason for bouncing is so the
|
|
|
|
* data can't be modified (by userspace) while it's in
|
|
|
|
* flight.
|
|
|
|
*/
|
|
|
|
if (bch2_rechecksum_bio(c, src, version, op->crc,
|
|
|
|
&crc, &op->crc,
|
|
|
|
src_len >> 9,
|
|
|
|
bio_sectors(src) - (src_len >> 9),
|
|
|
|
op->csum_type))
|
|
|
|
goto csum_err;
|
2022-10-11 08:33:56 +00:00
|
|
|
/*
|
|
|
|
* rchecksum_bio sets compression_type on crc from op->crc,
|
|
|
|
* this isn't always correct as sometimes we're changing
|
|
|
|
* an extent from uncompressed to incompressible.
|
|
|
|
*/
|
|
|
|
crc.compression_type = compression_type;
|
|
|
|
crc.nonce = nonce;
|
2017-03-17 06:18:50 +00:00
|
|
|
} else {
|
|
|
|
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
|
|
|
|
bch2_rechecksum_bio(c, src, version, op->crc,
|
|
|
|
NULL, &op->crc,
|
|
|
|
src_len >> 9,
|
|
|
|
bio_sectors(src) - (src_len >> 9),
|
|
|
|
op->crc.csum_type))
|
|
|
|
goto csum_err;
|
|
|
|
|
|
|
|
crc.compressed_size = dst_len >> 9;
|
|
|
|
crc.uncompressed_size = src_len >> 9;
|
|
|
|
crc.live_size = src_len >> 9;
|
|
|
|
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
2022-02-19 05:42:12 +00:00
|
|
|
ret = bch2_encrypt_bio(c, op->csum_type,
|
|
|
|
extent_nonce(version, crc), dst);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
crc.csum = bch2_checksum_bio(c, op->csum_type,
|
|
|
|
extent_nonce(version, crc), dst);
|
|
|
|
crc.csum_type = op->csum_type;
|
|
|
|
swap(dst->bi_iter.bi_size, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
init_append_extent(op, wp, version, crc);
|
|
|
|
|
|
|
|
if (dst != src)
|
|
|
|
bio_advance(dst, dst_len);
|
|
|
|
bio_advance(src, src_len);
|
2018-11-01 19:13:19 +00:00
|
|
|
total_output += dst_len;
|
|
|
|
total_input += src_len;
|
2017-03-17 06:18:50 +00:00
|
|
|
} while (dst->bi_iter.bi_size &&
|
|
|
|
src->bi_iter.bi_size &&
|
|
|
|
wp->sectors_free &&
|
|
|
|
!bch2_keylist_realloc(&op->insert_keys,
|
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_EXTENT_U64s_MAX));
|
|
|
|
|
|
|
|
more = src->bi_iter.bi_size != 0;
|
|
|
|
|
|
|
|
dst->bi_iter = saved_iter;
|
|
|
|
|
2018-11-01 19:13:19 +00:00
|
|
|
if (dst == src && more) {
|
|
|
|
BUG_ON(total_output != total_input);
|
|
|
|
|
|
|
|
dst = bio_split(src, total_input >> 9,
|
2023-05-28 22:02:38 +00:00
|
|
|
GFP_NOFS, &c->bio_write);
|
2018-11-01 19:13:19 +00:00
|
|
|
wbio_init(dst)->put_bio = true;
|
|
|
|
/* copy WRITE_SYNC flag */
|
|
|
|
dst->bi_opf = src->bi_opf;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dst->bi_iter.bi_size = total_output;
|
|
|
|
do_write:
|
2019-11-02 01:16:51 +00:00
|
|
|
*_dst = dst;
|
2017-03-17 06:18:50 +00:00
|
|
|
return more;
|
|
|
|
csum_err:
|
2024-06-28 17:51:38 +00:00
|
|
|
bch_err_inum_offset_ratelimited(c,
|
|
|
|
op->pos.inode,
|
|
|
|
op->pos.offset << 9,
|
|
|
|
"%s write error: error verifying existing checksum while rewriting existing data (memory corruption?)",
|
2024-02-17 01:03:12 +00:00
|
|
|
op->flags & BCH_WRITE_MOVE ? "move" : "user");
|
2017-03-17 06:18:50 +00:00
|
|
|
ret = -EIO;
|
|
|
|
err:
|
2018-11-01 19:13:19 +00:00
|
|
|
if (to_wbio(dst)->bounce)
|
2017-03-17 06:18:50 +00:00
|
|
|
bch2_bio_free_pages_pool(c, dst);
|
2018-11-01 19:13:19 +00:00
|
|
|
if (to_wbio(dst)->put_bio)
|
2017-03-17 06:18:50 +00:00
|
|
|
bio_put(dst);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
static bool bch2_extent_is_writeable(struct bch_write_op *op,
|
|
|
|
struct bkey_s_c k)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
|
|
|
struct bkey_s_c_extent e;
|
|
|
|
struct extent_ptr_decoded p;
|
|
|
|
const union bch_extent_entry *entry;
|
|
|
|
unsigned replicas = 0;
|
|
|
|
|
|
|
|
if (k.k->type != KEY_TYPE_extent)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
e = bkey_s_c_to_extent(k);
|
2024-05-01 00:54:20 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
extent_for_each_ptr_decode(e, p, entry) {
|
2024-05-01 00:54:20 +00:00
|
|
|
if (crc_is_encoded(p.crc) || p.has_ec) {
|
|
|
|
rcu_read_unlock();
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
return false;
|
2024-05-01 00:54:20 +00:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
replicas += bch2_extent_ptr_durability(c, &p);
|
|
|
|
}
|
2024-05-01 00:54:20 +00:00
|
|
|
rcu_read_unlock();
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
return replicas >= op->opts.data_replicas;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
|
|
|
|
struct btree_iter *iter,
|
|
|
|
struct bkey_i *orig,
|
|
|
|
struct bkey_s_c k,
|
|
|
|
u64 new_i_size)
|
|
|
|
{
|
|
|
|
if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
|
|
|
|
/* trace this */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-21 20:47:15 +00:00
|
|
|
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
|
|
|
|
int ret = PTR_ERR_OR_ZERO(new);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bch2_cut_front(bkey_start_pos(&orig->k), new);
|
|
|
|
bch2_cut_back(orig->k.p, new);
|
|
|
|
|
2023-12-21 20:47:15 +00:00
|
|
|
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
bkey_for_each_ptr(ptrs, ptr)
|
|
|
|
ptr->unwritten = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we're not calling bch2_subvol_get_snapshot() in this path -
|
|
|
|
* that was done when we kicked off the write, and here it's important
|
|
|
|
* that we update the extent that we wrote to - even if a snapshot has
|
|
|
|
* since been created. The write is still outstanding, so we're ok
|
|
|
|
* w.r.t. snapshot atomicity:
|
|
|
|
*/
|
|
|
|
return bch2_extent_update_i_size_sectors(trans, iter,
|
|
|
|
min(new->k.p.offset << 9, new_i_size), 0) ?:
|
|
|
|
bch2_trans_update(trans, iter, new,
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_UPDATE_internal_snapshot_node);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2023-09-12 21:16:02 +00:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
for_each_keylist_key(&op->insert_keys, orig) {
|
2023-12-22 03:24:46 +00:00
|
|
|
int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
bkey_start_pos(&orig->k), orig->k.p,
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_ITER_intent, k,
|
2023-11-11 21:31:50 +00:00
|
|
|
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}));
|
|
|
|
|
|
|
|
if (ret && !bch2_err_matches(ret, EROFS)) {
|
2023-09-12 22:41:22 +00:00
|
|
|
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
2023-09-12 22:41:22 +00:00
|
|
|
insert->k.p.inode, insert->k.p.offset << 9,
|
2024-02-17 01:03:12 +00:00
|
|
|
"%s write error while doing btree update: %s",
|
|
|
|
op->flags & BCH_WRITE_MOVE ? "move" : "user",
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
bch2_err_str(ret));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
op->error = ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_put(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __bch2_nocow_write_done(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
|
|
|
|
op->error = -EIO;
|
|
|
|
} else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
|
|
|
|
bch2_nocow_write_convert_unwritten(op);
|
|
|
|
}
|
|
|
|
|
2023-11-18 00:13:27 +00:00
|
|
|
static CLOSURE_CALLBACK(bch2_nocow_write_done)
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
{
|
2023-11-18 00:13:27 +00:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
__bch2_nocow_write_done(op);
|
|
|
|
bch2_write_done(cl);
|
|
|
|
}
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
struct bucket_to_lock {
|
|
|
|
struct bpos b;
|
|
|
|
unsigned gen;
|
|
|
|
struct nocow_lock_bucket *l;
|
|
|
|
};
|
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
static void bch2_nocow_write(struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2023-09-12 21:16:02 +00:00
|
|
|
struct btree_trans *trans;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_s_c k;
|
2023-12-30 20:32:05 +00:00
|
|
|
DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
u32 snapshot;
|
2023-12-30 20:32:05 +00:00
|
|
|
struct bucket_to_lock *stale_at;
|
2024-06-06 19:06:22 +00:00
|
|
|
int stale, ret;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
if (op->flags & BCH_WRITE_MOVE)
|
|
|
|
return;
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
darray_init(&buckets);
|
2023-09-12 21:16:02 +00:00
|
|
|
trans = bch2_trans_get(c);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
retry:
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_begin(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
goto err;
|
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
SPOS(op->pos.inode, op->pos.offset, snapshot),
|
2024-04-07 22:05:34 +00:00
|
|
|
BTREE_ITER_slots);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
while (1) {
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
buckets.nr = 0;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2024-04-10 03:23:08 +00:00
|
|
|
ret = bch2_trans_relock(trans);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
k = bch2_btree_iter_peek_slot(&iter);
|
|
|
|
ret = bkey_err(k);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* fall back to normal cow write path? */
|
|
|
|
if (unlikely(k.k->p.snapshot != snapshot ||
|
|
|
|
!bch2_extent_is_writeable(op, k)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (bch2_keylist_realloc(&op->insert_keys,
|
2023-12-30 20:32:05 +00:00
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
k.k->u64s))
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Get iorefs before dropping btree locks: */
|
2023-12-21 20:47:15 +00:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
bkey_for_each_ptr(ptrs, ptr) {
|
2024-04-30 19:37:51 +00:00
|
|
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
|
2024-05-03 16:53:27 +00:00
|
|
|
if (unlikely(!ca))
|
|
|
|
goto err_get_ioref;
|
|
|
|
|
2024-04-30 23:34:28 +00:00
|
|
|
struct bpos b = PTR_BUCKET_POS(ca, ptr);
|
2023-12-30 20:32:05 +00:00
|
|
|
struct nocow_lock_bucket *l =
|
|
|
|
bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
|
|
|
|
prefetch(l);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
/* XXX allocating memory with btree locks held - rare */
|
|
|
|
darray_push_gfp(&buckets, ((struct bucket_to_lock) {
|
|
|
|
.b = b, .gen = ptr->gen, .l = l,
|
|
|
|
}), GFP_KERNEL|__GFP_NOFAIL);
|
2023-03-19 16:50:05 +00:00
|
|
|
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
if (ptr->unwritten)
|
|
|
|
op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock before taking nocow locks, doing IO: */
|
|
|
|
bkey_reassemble(op->insert_keys.top, k);
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_unlock(trans);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
bch2_cut_front(op->pos, op->insert_keys.top);
|
|
|
|
if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
|
|
|
|
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
darray_for_each(buckets, i) {
|
2024-05-01 07:59:45 +00:00
|
|
|
struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
__bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
|
|
|
|
bucket_to_u64(i->b),
|
2022-12-15 01:52:11 +00:00
|
|
|
BUCKET_NOCOW_LOCK_UPDATE);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2024-06-06 19:06:22 +00:00
|
|
|
u8 *gen = bucket_gen(ca, i->b.offset);
|
|
|
|
stale = !gen ? -1 : gen_after(*gen, i->gen);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
if (unlikely(stale)) {
|
|
|
|
stale_at = i;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
goto err_bucket_stale;
|
2023-12-30 20:32:05 +00:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bio = &op->wbio.bio;
|
|
|
|
if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
|
|
|
|
bio = bio_split(bio, k.k->p.offset - op->pos.offset,
|
|
|
|
GFP_KERNEL, &c->bio_write);
|
|
|
|
wbio_init(bio)->put_bio = true;
|
|
|
|
bio->bi_opf = op->wbio.bio.bi_opf;
|
|
|
|
} else {
|
2023-08-28 20:13:18 +00:00
|
|
|
op->flags |= BCH_WRITE_SUBMITTED;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
op->pos.offset += bio_sectors(bio);
|
|
|
|
op->written += bio_sectors(bio);
|
|
|
|
|
|
|
|
bio->bi_end_io = bch2_write_endio;
|
|
|
|
bio->bi_private = &op->cl;
|
|
|
|
bio->bi_opf |= REQ_OP_WRITE;
|
|
|
|
closure_get(&op->cl);
|
|
|
|
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
|
|
|
|
op->insert_keys.top, true);
|
|
|
|
|
|
|
|
bch2_keylist_push(&op->insert_keys);
|
2023-08-28 20:13:18 +00:00
|
|
|
if (op->flags & BCH_WRITE_SUBMITTED)
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
break;
|
|
|
|
bch2_btree_iter_advance(&iter);
|
|
|
|
}
|
|
|
|
out:
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
err:
|
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
2023-12-30 20:32:05 +00:00
|
|
|
op->pos.inode, op->pos.offset << 9,
|
|
|
|
"%s: btree lookup error %s", __func__, bch2_err_str(ret));
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
op->error = ret;
|
2023-08-28 20:13:18 +00:00
|
|
|
op->flags |= BCH_WRITE_SUBMITTED;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_trans_put(trans);
|
2023-12-30 20:32:05 +00:00
|
|
|
darray_exit(&buckets);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
/* fallback to cow write path? */
|
2023-08-28 20:13:18 +00:00
|
|
|
if (!(op->flags & BCH_WRITE_SUBMITTED)) {
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
closure_sync(&op->cl);
|
|
|
|
__bch2_nocow_write_done(op);
|
|
|
|
op->insert_keys.top = op->insert_keys.keys;
|
|
|
|
} else if (op->flags & BCH_WRITE_SYNC) {
|
|
|
|
closure_sync(&op->cl);
|
2023-11-18 00:13:27 +00:00
|
|
|
bch2_nocow_write_done(&op->cl.work);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* needs to run out of process context because ei_quota_lock is
|
|
|
|
* a mutex
|
|
|
|
*/
|
|
|
|
continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
err_get_ioref:
|
2023-12-30 20:32:05 +00:00
|
|
|
darray_for_each(buckets, i)
|
2024-05-01 07:59:45 +00:00
|
|
|
percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
|
|
|
/* Fall back to COW path: */
|
|
|
|
goto out;
|
|
|
|
err_bucket_stale:
|
2023-12-30 20:32:05 +00:00
|
|
|
darray_for_each(buckets, i) {
|
|
|
|
bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
|
|
|
|
if (i == stale_at)
|
|
|
|
break;
|
2023-09-25 01:05:50 +00:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2024-06-06 19:06:22 +00:00
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
if (bch2_fs_inconsistent_on(stale < 0, c,
|
|
|
|
"pointer to invalid bucket in nocow path on device %llu\n %s",
|
|
|
|
stale_at->b.inode,
|
|
|
|
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
|
|
|
ret = -EIO;
|
|
|
|
} else {
|
|
|
|
/* We can retry this: */
|
|
|
|
ret = -BCH_ERR_transaction_restart;
|
|
|
|
}
|
|
|
|
printbuf_exit(&buf);
|
|
|
|
|
2023-12-30 20:32:05 +00:00
|
|
|
goto err_get_ioref;
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
static void __bch2_write(struct bch_write_op *op)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
|
|
|
struct bch_fs *c = op->c;
|
2022-10-31 20:13:05 +00:00
|
|
|
struct write_point *wp = NULL;
|
2022-03-21 23:34:48 +00:00
|
|
|
struct bio *bio = NULL;
|
2020-07-20 17:00:15 +00:00
|
|
|
unsigned nofs_flags;
|
2017-03-17 06:18:50 +00:00
|
|
|
int ret;
|
2020-07-20 17:00:15 +00:00
|
|
|
|
|
|
|
nofs_flags = memalloc_nofs_save();
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
|
2023-02-25 00:07:21 +00:00
|
|
|
if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
bch2_nocow_write(op);
|
2023-08-28 20:13:18 +00:00
|
|
|
if (op->flags & BCH_WRITE_SUBMITTED)
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
goto out_nofs_restore;
|
|
|
|
}
|
2017-03-17 06:18:50 +00:00
|
|
|
again:
|
2018-11-01 19:13:19 +00:00
|
|
|
memset(&op->failed, 0, sizeof(op->failed));
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
do {
|
2019-11-02 01:16:51 +00:00
|
|
|
struct bkey_i *key_to_write;
|
|
|
|
unsigned key_to_write_offset = op->insert_keys.top_p -
|
|
|
|
op->insert_keys.keys_p;
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/* +1 for possible cache device: */
|
2018-10-06 08:12:42 +00:00
|
|
|
if (op->open_buckets.nr + op->nr_replicas + 1 >
|
|
|
|
ARRAY_SIZE(op->open_buckets.v))
|
2022-10-31 20:13:05 +00:00
|
|
|
break;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
|
|
|
if (bch2_keylist_realloc(&op->insert_keys,
|
|
|
|
op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_EXTENT_U64s_MAX))
|
2022-10-31 20:13:05 +00:00
|
|
|
break;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2020-08-12 17:48:02 +00:00
|
|
|
/*
|
|
|
|
* The copygc thread is now global, which means it's no longer
|
|
|
|
* freeing up space on specific disks, which means that
|
|
|
|
* allocations for specific disks may hang arbitrarily long:
|
|
|
|
*/
|
2022-11-02 19:41:32 +00:00
|
|
|
ret = bch2_trans_do(c, NULL, NULL, 0,
|
2023-09-12 21:16:02 +00:00
|
|
|
bch2_alloc_sectors_start_trans(trans,
|
2022-11-02 19:41:32 +00:00
|
|
|
op->target,
|
|
|
|
op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
|
|
|
|
op->write_point,
|
|
|
|
&op->devs_have,
|
|
|
|
op->nr_replicas,
|
|
|
|
op->nr_replicas_required,
|
2023-06-24 23:30:10 +00:00
|
|
|
op->watermark,
|
2022-11-02 19:41:32 +00:00
|
|
|
op->flags,
|
|
|
|
(op->flags & (BCH_WRITE_ALLOC_NOWAIT|
|
|
|
|
BCH_WRITE_ONLY_SPECIFIED_DEVS))
|
|
|
|
? NULL : &op->cl, &wp));
|
2022-10-31 20:13:05 +00:00
|
|
|
if (unlikely(ret)) {
|
2022-12-13 20:17:40 +00:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
|
2022-10-29 19:54:17 +00:00
|
|
|
break;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-10-29 19:54:17 +00:00
|
|
|
goto err;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
EBUG_ON(!wp);
|
2020-07-23 02:40:32 +00:00
|
|
|
|
2018-10-06 08:12:42 +00:00
|
|
|
bch2_open_bucket_get(c, wp, &op->open_buckets);
|
2019-11-02 01:16:51 +00:00
|
|
|
ret = bch2_write_extent(op, wp, &bio);
|
2022-10-31 20:13:05 +00:00
|
|
|
|
2022-11-24 23:03:55 +00:00
|
|
|
bch2_alloc_sectors_done_inlined(c, wp);
|
2022-10-29 19:54:17 +00:00
|
|
|
err:
|
|
|
|
if (ret <= 0) {
|
2023-08-28 20:13:18 +00:00
|
|
|
op->flags |= BCH_WRITE_SUBMITTED;
|
2019-11-02 01:16:51 +00:00
|
|
|
|
2022-10-29 19:54:17 +00:00
|
|
|
if (ret < 0) {
|
2024-01-11 04:08:30 +00:00
|
|
|
if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
|
|
|
|
bch_err_inum_offset_ratelimited(c,
|
|
|
|
op->pos.inode,
|
|
|
|
op->pos.offset << 9,
|
2024-02-17 01:03:12 +00:00
|
|
|
"%s(): %s error: %s", __func__,
|
|
|
|
op->flags & BCH_WRITE_MOVE ? "move" : "user",
|
|
|
|
bch2_err_str(ret));
|
2022-10-29 19:54:17 +00:00
|
|
|
op->error = ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-02 01:16:51 +00:00
|
|
|
bio->bi_end_io = bch2_write_endio;
|
|
|
|
bio->bi_private = &op->cl;
|
2019-11-09 21:43:16 +00:00
|
|
|
bio->bi_opf |= REQ_OP_WRITE;
|
2019-11-02 01:16:51 +00:00
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
closure_get(bio->bi_private);
|
2019-11-02 01:16:51 +00:00
|
|
|
|
|
|
|
key_to_write = (void *) (op->insert_keys.keys_p +
|
|
|
|
key_to_write_offset);
|
|
|
|
|
2020-07-09 22:28:11 +00:00
|
|
|
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
key_to_write, false);
|
2017-03-17 06:18:50 +00:00
|
|
|
} while (ret);
|
2022-10-29 19:54:17 +00:00
|
|
|
|
2020-05-13 04:15:28 +00:00
|
|
|
/*
|
2022-10-29 19:54:17 +00:00
|
|
|
* Sync or no?
|
|
|
|
*
|
|
|
|
* If we're running asynchronously, wne may still want to block
|
|
|
|
* synchronously here if we weren't able to submit all of the IO at
|
|
|
|
* once, as that signals backpressure to the caller.
|
2020-05-13 04:15:28 +00:00
|
|
|
*/
|
2022-10-29 19:54:17 +00:00
|
|
|
if ((op->flags & BCH_WRITE_SYNC) ||
|
2023-08-28 20:13:18 +00:00
|
|
|
(!(op->flags & BCH_WRITE_SUBMITTED) &&
|
2022-10-29 19:54:17 +00:00
|
|
|
!(op->flags & BCH_WRITE_IN_WORKER))) {
|
2024-08-07 17:58:57 +00:00
|
|
|
bch2_wait_on_allocator(c, &op->cl);
|
2024-05-03 18:49:23 +00:00
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
__bch2_write_index(op);
|
|
|
|
|
2023-08-28 20:13:18 +00:00
|
|
|
if (!(op->flags & BCH_WRITE_SUBMITTED))
|
2022-10-31 20:13:05 +00:00
|
|
|
goto again;
|
2022-11-03 04:29:43 +00:00
|
|
|
bch2_write_done(&op->cl);
|
2022-10-31 20:13:05 +00:00
|
|
|
} else {
|
2023-03-01 04:08:04 +00:00
|
|
|
bch2_write_queue(op, wp);
|
2022-10-31 20:13:05 +00:00
|
|
|
continue_at(&op->cl, bch2_write_index, NULL);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-02 21:12:00 +00:00
|
|
|
out_nofs_restore:
|
2022-10-31 20:13:05 +00:00
|
|
|
memalloc_nofs_restore(nofs_flags);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2019-11-09 21:43:16 +00:00
|
|
|
static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
|
|
|
|
{
|
|
|
|
struct bio *bio = &op->wbio.bio;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
struct bkey_i_inline_data *id;
|
|
|
|
unsigned sectors;
|
|
|
|
int ret;
|
|
|
|
|
2024-05-04 16:29:46 +00:00
|
|
|
memset(&op->failed, 0, sizeof(op->failed));
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
|
2023-08-28 20:13:18 +00:00
|
|
|
op->flags |= BCH_WRITE_SUBMITTED;
|
2022-10-31 20:13:05 +00:00
|
|
|
|
2019-12-29 01:17:06 +00:00
|
|
|
bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
|
2019-11-29 18:47:42 +00:00
|
|
|
|
2019-11-09 21:43:16 +00:00
|
|
|
ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
|
|
|
|
ARRAY_SIZE(op->inline_keys),
|
|
|
|
BKEY_U64s + DIV_ROUND_UP(data_len, 8));
|
|
|
|
if (ret) {
|
|
|
|
op->error = ret;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
sectors = bio_sectors(bio);
|
|
|
|
op->pos.offset += sectors;
|
|
|
|
|
|
|
|
id = bkey_inline_data_init(op->insert_keys.top);
|
|
|
|
id->k.p = op->pos;
|
|
|
|
id->k.version = op->version;
|
|
|
|
id->k.size = sectors;
|
|
|
|
|
|
|
|
iter = bio->bi_iter;
|
|
|
|
iter.bi_size = data_len;
|
|
|
|
memcpy_from_bio(id->v.data, bio, iter);
|
|
|
|
|
|
|
|
while (data_len & 7)
|
|
|
|
id->v.data[data_len++] = '\0';
|
|
|
|
set_bkey_val_bytes(&id->k, data_len);
|
|
|
|
bch2_keylist_push(&op->insert_keys);
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
__bch2_write_index(op);
|
2019-11-09 21:43:16 +00:00
|
|
|
err:
|
2022-11-03 04:29:43 +00:00
|
|
|
bch2_write_done(&op->cl);
|
2019-11-09 21:43:16 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
/**
|
2023-09-12 22:41:22 +00:00
|
|
|
* bch2_write() - handle a write to a cache device or flash only volume
|
|
|
|
* @cl: &bch_write_op->cl
|
2017-03-17 06:18:50 +00:00
|
|
|
*
|
|
|
|
* This is the starting point for any data to end up in a cache device; it could
|
|
|
|
* be from a normal write, or a writeback write, or a write to a flash only
|
|
|
|
* volume - it's also used by the moving garbage collector to compact data in
|
|
|
|
* mostly empty buckets.
|
|
|
|
*
|
|
|
|
* It first writes the data to the cache, creating a list of keys to be inserted
|
|
|
|
* (if the data won't fit in a single open bucket, there will be multiple keys);
|
|
|
|
* after the data is written it calls bch_journal, and after the keys have been
|
|
|
|
* added to the next journal write they're inserted into the btree.
|
|
|
|
*
|
|
|
|
* If op->discard is true, instead of inserting the data it invalidates the
|
|
|
|
* region of the cache represented by op->bio and op->inode.
|
|
|
|
*/
|
2023-11-18 00:13:27 +00:00
|
|
|
CLOSURE_CALLBACK(bch2_write)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2023-11-18 00:13:27 +00:00
|
|
|
closure_type(op, struct bch_write_op, cl);
|
2019-08-22 00:16:42 +00:00
|
|
|
struct bio *bio = &op->wbio.bio;
|
2017-03-17 06:18:50 +00:00
|
|
|
struct bch_fs *c = op->c;
|
2019-11-09 21:43:16 +00:00
|
|
|
unsigned data_len;
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
EBUG_ON(op->cl.parent);
|
2017-03-17 06:18:50 +00:00
|
|
|
BUG_ON(!op->nr_replicas);
|
|
|
|
BUG_ON(!op->write_point.v);
|
2022-11-24 08:12:22 +00:00
|
|
|
BUG_ON(bkey_eq(op->pos, POS_MAX));
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2024-02-11 02:01:40 +00:00
|
|
|
op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
|
2019-11-09 21:43:16 +00:00
|
|
|
op->start_time = local_clock();
|
|
|
|
bch2_keylist_init(&op->insert_keys, op->inline_keys);
|
|
|
|
wbio_init(bio)->put_bio = false;
|
|
|
|
|
2021-12-14 19:24:41 +00:00
|
|
|
if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
|
2022-11-16 01:25:08 +00:00
|
|
|
bch_err_inum_offset_ratelimited(c,
|
|
|
|
op->pos.inode,
|
|
|
|
op->pos.offset << 9,
|
2024-02-17 01:03:12 +00:00
|
|
|
"%s write error: misaligned write",
|
|
|
|
op->flags & BCH_WRITE_MOVE ? "move" : "user");
|
2019-08-22 00:16:42 +00:00
|
|
|
op->error = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-03-14 02:01:47 +00:00
|
|
|
if (c->opts.nochanges) {
|
|
|
|
op->error = -BCH_ERR_erofs_no_writes;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(op->flags & BCH_WRITE_MOVE) &&
|
2023-02-09 17:21:45 +00:00
|
|
|
!bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
|
2022-12-12 01:37:11 +00:00
|
|
|
op->error = -BCH_ERR_erofs_no_writes;
|
2019-08-22 00:16:42 +00:00
|
|
|
goto err;
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 08:36:33 +00:00
|
|
|
this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
|
2019-08-22 00:16:42 +00:00
|
|
|
bch2_increment_clock(c, bio_sectors(bio), WRITE);
|
2017-03-17 06:18:50 +00:00
|
|
|
|
2019-11-09 21:43:16 +00:00
|
|
|
data_len = min_t(u64, bio->bi_iter.bi_size,
|
|
|
|
op->new_i_size - (op->pos.offset << 9));
|
|
|
|
|
2019-11-29 18:48:09 +00:00
|
|
|
if (c->opts.inline_data &&
|
|
|
|
data_len <= min(block_bytes(c) / 2, 1024U)) {
|
2019-11-09 21:43:16 +00:00
|
|
|
bch2_write_data_inline(op, data_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
__bch2_write(op);
|
2019-08-22 00:16:42 +00:00
|
|
|
return;
|
|
|
|
err:
|
2020-06-29 22:22:06 +00:00
|
|
|
bch2_disk_reservation_put(c, &op->res);
|
2019-12-27 18:44:03 +00:00
|
|
|
|
2022-10-31 20:13:05 +00:00
|
|
|
closure_debug_destroy(&op->cl);
|
|
|
|
if (op->end_io)
|
2019-11-11 18:42:10 +00:00
|
|
|
op->end_io(op);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2023-07-07 02:47:42 +00:00
|
|
|
static const char * const bch2_write_flags[] = {
|
2023-03-12 01:38:46 +00:00
|
|
|
#define x(f) #f,
|
|
|
|
BCH_WRITE_FLAGS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
|
|
|
|
{
|
|
|
|
prt_str(out, "pos: ");
|
|
|
|
bch2_bpos_to_text(out, op->pos);
|
|
|
|
prt_newline(out);
|
|
|
|
printbuf_indent_add(out, 2);
|
|
|
|
|
|
|
|
prt_str(out, "started: ");
|
|
|
|
bch2_pr_time_units(out, local_clock() - op->start_time);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "flags: ");
|
|
|
|
prt_bitflags(out, bch2_write_flags, op->flags);
|
|
|
|
prt_newline(out);
|
|
|
|
|
2024-04-10 20:08:24 +00:00
|
|
|
prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
|
2023-03-12 01:38:46 +00:00
|
|
|
|
|
|
|
printbuf_indent_sub(out, 2);
|
|
|
|
}
|
|
|
|
|
2023-09-10 22:05:17 +00:00
|
|
|
void bch2_fs_io_write_exit(struct bch_fs *c)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2023-09-10 22:05:17 +00:00
|
|
|
mempool_exit(&c->bio_bounce_pages);
|
2024-05-01 00:32:44 +00:00
|
|
|
bioset_exit(&c->replica_set);
|
2023-09-10 22:05:17 +00:00
|
|
|
bioset_exit(&c->bio_write);
|
2017-03-17 06:18:50 +00:00
|
|
|
}
|
|
|
|
|
2023-09-10 22:05:17 +00:00
|
|
|
int bch2_fs_io_write_init(struct bch_fs *c)
|
2017-03-17 06:18:50 +00:00
|
|
|
{
|
2024-05-01 00:32:44 +00:00
|
|
|
if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
|
|
|
|
bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
|
2023-09-10 22:05:17 +00:00
|
|
|
return -BCH_ERR_ENOMEM_bio_write_init;
|
2023-03-14 19:35:57 +00:00
|
|
|
|
|
|
|
if (mempool_init_page_pool(&c->bio_bounce_pages,
|
2017-03-17 06:18:50 +00:00
|
|
|
max_t(unsigned,
|
2021-12-14 19:34:03 +00:00
|
|
|
c->opts.btree_node_size,
|
|
|
|
c->opts.encoded_extent_max) /
|
2023-03-14 19:35:57 +00:00
|
|
|
PAGE_SIZE, 0))
|
|
|
|
return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
|
|
|
|
|
2017-03-17 06:18:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|