mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
bcache: remove PTR_CACHE
Remove the PTR_CACHE inline and replace it with a direct dereference of c->cache. (Coly Li: fix the typo from PTR_BUCKET to PTR_CACHE in commit log) Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Coly Li <colyli@suse.de> Link: https://lore.kernel.org/r/20210411134316.80274-3-colyli@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
13e1db65d2
commit
11e9560e6c
@ -482,8 +482,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
__bch_bucket_free(PTR_CACHE(c, k, i),
|
||||
PTR_BUCKET(c, k, i));
|
||||
__bch_bucket_free(c->cache, PTR_BUCKET(c, k, i));
|
||||
}
|
||||
|
||||
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
|
||||
@ -674,7 +673,7 @@ bool bch_alloc_sectors(struct cache_set *c,
|
||||
SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
|
||||
|
||||
atomic_long_add(sectors,
|
||||
&PTR_CACHE(c, &b->key, i)->sectors_written);
|
||||
&c->cache->sectors_written);
|
||||
}
|
||||
|
||||
if (b->sectors_free < c->cache->sb.block_size)
|
||||
|
@ -804,13 +804,6 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
|
||||
return s & (c->cache->sb.bucket_size - 1);
|
||||
}
|
||||
|
||||
static inline struct cache *PTR_CACHE(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned int ptr)
|
||||
{
|
||||
return c->cache;
|
||||
}
|
||||
|
||||
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned int ptr)
|
||||
@ -822,7 +815,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned int ptr)
|
||||
{
|
||||
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
|
||||
return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
|
||||
}
|
||||
|
||||
static inline uint8_t gen_after(uint8_t a, uint8_t b)
|
||||
@ -841,7 +834,7 @@ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
|
||||
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
|
||||
unsigned int i)
|
||||
{
|
||||
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
|
||||
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
|
||||
}
|
||||
|
||||
/* Btree key macros */
|
||||
|
@ -426,7 +426,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
|
||||
do_btree_node_write(b);
|
||||
|
||||
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
|
||||
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
|
||||
&b->c->cache->btree_sectors_written);
|
||||
|
||||
b->written += set_blocks(i, block_bytes(b->c->cache));
|
||||
}
|
||||
@ -1161,7 +1161,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
SET_PTR_GEN(k, i,
|
||||
bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
|
||||
bch_inc_gen(b->c->cache,
|
||||
PTR_BUCKET(b->c, &b->key, i)));
|
||||
|
||||
mutex_unlock(&b->c->bucket_lock);
|
||||
|
@ -50,7 +50,7 @@ void bch_btree_verify(struct btree *b)
|
||||
v->keys.ops = b->keys.ops;
|
||||
|
||||
bio = bch_bbio_alloc(b->c);
|
||||
bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
|
||||
bio_set_dev(bio, c->cache->bdev);
|
||||
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
|
||||
bio->bi_opf = REQ_OP_READ | REQ_META;
|
||||
|
@ -50,7 +50,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i)) {
|
||||
struct cache *ca = PTR_CACHE(c, k, i);
|
||||
struct cache *ca = c->cache;
|
||||
size_t bucket = PTR_BUCKET_NR(c, k, i);
|
||||
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
|
||||
|
||||
@ -71,7 +71,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i)) {
|
||||
struct cache *ca = PTR_CACHE(c, k, i);
|
||||
struct cache *ca = c->cache;
|
||||
size_t bucket = PTR_BUCKET_NR(c, k, i);
|
||||
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
|
||||
|
||||
|
@ -36,7 +36,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
|
||||
struct bbio *b = container_of(bio, struct bbio, bio);
|
||||
|
||||
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
|
||||
bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
|
||||
bio_set_dev(bio, c->cache->bdev);
|
||||
|
||||
b->submit_time_us = local_clock_us();
|
||||
closure_bio_submit(c, bio, bio->bi_private);
|
||||
@ -137,7 +137,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
|
||||
blk_status_t error, const char *m)
|
||||
{
|
||||
struct bbio *b = container_of(bio, struct bbio, bio);
|
||||
struct cache *ca = PTR_CACHE(c, &b->key, 0);
|
||||
struct cache *ca = c->cache;
|
||||
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
|
||||
|
||||
unsigned int threshold = op_is_write(bio_op(bio))
|
||||
|
@ -768,7 +768,7 @@ static void journal_write_unlocked(struct closure *cl)
|
||||
w->data->csum = csum_set(w->data);
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++) {
|
||||
ca = PTR_CACHE(c, k, i);
|
||||
ca = c->cache;
|
||||
bio = &ca->journal.bio;
|
||||
|
||||
atomic_long_add(sectors, &ca->meta_sectors_written);
|
||||
|
@ -416,7 +416,7 @@ static void read_dirty_endio(struct bio *bio)
|
||||
struct dirty_io *io = w->private;
|
||||
|
||||
/* is_read = 1 */
|
||||
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
|
||||
bch_count_io_errors(io->dc->disk.c->cache,
|
||||
bio->bi_status, 1,
|
||||
"reading dirty data from cache");
|
||||
|
||||
@ -510,8 +510,7 @@ static void read_dirty(struct cached_dev *dc)
|
||||
dirty_init(w);
|
||||
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
|
||||
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
|
||||
bio_set_dev(&io->bio,
|
||||
PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
|
||||
bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
|
||||
io->bio.bi_end_io = read_dirty_endio;
|
||||
|
||||
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
|
||||
|
Loading…
Reference in New Issue
Block a user