mirror of
https://github.com/torvalds/linux.git
synced 2024-10-23 21:50:43 +00:00
bcachefs: Refactor rebalance_pred function
Before, the logic for if we should move an extent was duplicated somewhat, in both rebalance_add_key() and rebalance_pred(); this centralizes that in __rebalance_pred() This is prep work for a patch that enables marking data as incompressible. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
65d9f536fa
commit
182084e3dc
|
@ -17,50 +17,51 @@
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/sched/cputime.h>
|
#include <linux/sched/cputime.h>
|
||||||
|
|
||||||
static inline bool rebalance_ptr_pred(struct bch_fs *c,
|
/*
|
||||||
struct extent_ptr_decoded p,
|
* Check if an extent should be moved:
|
||||||
struct bch_io_opts *io_opts)
|
* returns -1 if it should not be moved, or
|
||||||
|
* device of pointer that should be moved, if known, or INT_MAX if unknown
|
||||||
|
*/
|
||||||
|
static int __bch2_rebalance_pred(struct bch_fs *c,
|
||||||
|
struct bkey_s_c k,
|
||||||
|
struct bch_io_opts *io_opts)
|
||||||
{
|
{
|
||||||
if (io_opts->background_target &&
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||||
!bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
|
const union bch_extent_entry *entry;
|
||||||
!p.ptr.cached)
|
struct extent_ptr_decoded p;
|
||||||
return true;
|
|
||||||
|
|
||||||
if (io_opts->background_compression &&
|
if (io_opts->background_compression)
|
||||||
p.crc.compression_type !=
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||||
bch2_compression_opt_to_type[io_opts->background_compression])
|
if (!p.ptr.cached &&
|
||||||
return true;
|
p.crc.compression_type !=
|
||||||
|
bch2_compression_opt_to_type[io_opts->background_compression])
|
||||||
|
return p.ptr.dev;
|
||||||
|
|
||||||
return false;
|
if (io_opts->background_target)
|
||||||
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||||
|
if (!p.ptr.cached &&
|
||||||
|
!bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target))
|
||||||
|
return p.ptr.dev;
|
||||||
|
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_rebalance_add_key(struct bch_fs *c,
|
void bch2_rebalance_add_key(struct bch_fs *c,
|
||||||
struct bkey_s_c k,
|
struct bkey_s_c k,
|
||||||
struct bch_io_opts *io_opts)
|
struct bch_io_opts *io_opts)
|
||||||
{
|
{
|
||||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
atomic64_t *counter;
|
||||||
const union bch_extent_entry *entry;
|
int dev;
|
||||||
struct extent_ptr_decoded p;
|
|
||||||
|
|
||||||
if (!io_opts->background_target &&
|
dev = __bch2_rebalance_pred(c, k, io_opts);
|
||||||
!io_opts->background_compression)
|
if (dev < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
counter = dev < INT_MAX
|
||||||
if (rebalance_ptr_pred(c, p, io_opts)) {
|
? &bch_dev_bkey_exists(c, dev)->rebalance_work
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
: &c->rebalance.work_unknown_dev;
|
||||||
|
|
||||||
if (atomic64_add_return(p.crc.compressed_size,
|
if (atomic64_add_return(k.k->size, counter) == k.k->size)
|
||||||
&ca->rebalance_work) ==
|
|
||||||
p.crc.compressed_size)
|
|
||||||
rebalance_wakeup(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
|
|
||||||
{
|
|
||||||
if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
|
|
||||||
sectors)
|
|
||||||
rebalance_wakeup(c);
|
rebalance_wakeup(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,26 +70,20 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
|
||||||
struct bch_io_opts *io_opts,
|
struct bch_io_opts *io_opts,
|
||||||
struct data_opts *data_opts)
|
struct data_opts *data_opts)
|
||||||
{
|
{
|
||||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
if (__bch2_rebalance_pred(c, k, io_opts) >= 0) {
|
||||||
const union bch_extent_entry *entry;
|
data_opts->target = io_opts->background_target;
|
||||||
struct extent_ptr_decoded p;
|
data_opts->btree_insert_flags = 0;
|
||||||
unsigned nr_replicas = 0;
|
return DATA_ADD_REPLICAS;
|
||||||
|
} else {
|
||||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
return DATA_SKIP;
|
||||||
nr_replicas += !p.ptr.cached;
|
|
||||||
|
|
||||||
if (rebalance_ptr_pred(c, p, io_opts))
|
|
||||||
goto found;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (nr_replicas < io_opts->data_replicas)
|
void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
|
||||||
goto found;
|
{
|
||||||
|
if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
|
||||||
return DATA_SKIP;
|
sectors)
|
||||||
found:
|
rebalance_wakeup(c);
|
||||||
data_opts->target = io_opts->background_target;
|
|
||||||
data_opts->btree_insert_flags = 0;
|
|
||||||
return DATA_ADD_REPLICAS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct rebalance_work {
|
struct rebalance_work {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user