mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
a8394090a9
__update_write_rate() uses a Proportion-Differentiation Controller algorithm to control writeback rate. A dirty target number is used in this PD controller to control writeback rate. A larger target number will make the writeback rate smaller, on the versus, a smaller target number will make the writeback rate larger. bcache uses the following steps to calculate the target number, 1) cache_sectors = all-buckets-of-cache-set * buckets-size 2) cache_dirty_target = cache_sectors * cached-device-writeback_percent 3) target = cache_dirty_target * (sectors-of-cached-device/sectors-of-all-cached-devices-of-this-cache-set) The calculation at step 1) for cache_sectors is incorrect, which does not consider dirty blocks occupied by flash only volume. A flash only volume can be took as a bcache device without cached device. All data sectors allocated for it are persistent on cache device and marked dirty, they are not touched by bcache writeback and garbage collection code. So data blocks of flash only volume should be ignore when calculating cache_sectors of cache set. Current code does not subtract dirty sectors of flash only volume, which results a larger target number from the above 3 steps. And in sequence the cache device's writeback rate is smaller then a correct value, writeback speed is slower on all cached devices. This patch fixes the incorrect slower writeback rate by subtracting dirty sectors of flash only volumes in __update_writeback_rate(). (Commit log composed by Coly Li to pass checkpatch.pl checking) Signed-off-by: Tang Junhui <tang.junhui@zte.com.cn> Reviewed-by: Coly Li <colyli@suse.de> Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
111 lines
2.5 KiB
C
111 lines
2.5 KiB
C
#ifndef _BCACHE_WRITEBACK_H
|
|
#define _BCACHE_WRITEBACK_H
|
|
|
|
#define CUTOFF_WRITEBACK 40
|
|
#define CUTOFF_WRITEBACK_SYNC 70
|
|
|
|
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
|
|
{
|
|
uint64_t i, ret = 0;
|
|
|
|
for (i = 0; i < d->nr_stripes; i++)
|
|
ret += atomic_read(d->stripe_sectors_dirty + i);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
|
|
{
|
|
uint64_t i, ret = 0;
|
|
|
|
mutex_lock(&bch_register_lock);
|
|
|
|
for (i = 0; i < c->nr_uuids; i++) {
|
|
struct bcache_device *d = c->devices[i];
|
|
|
|
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
|
|
continue;
|
|
ret += bcache_dev_sectors_dirty(d);
|
|
}
|
|
|
|
mutex_unlock(&bch_register_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline unsigned offset_to_stripe(struct bcache_device *d,
|
|
uint64_t offset)
|
|
{
|
|
do_div(offset, d->stripe_size);
|
|
return offset;
|
|
}
|
|
|
|
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
|
|
uint64_t offset,
|
|
unsigned nr_sectors)
|
|
{
|
|
unsigned stripe = offset_to_stripe(&dc->disk, offset);
|
|
|
|
while (1) {
|
|
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
|
|
return true;
|
|
|
|
if (nr_sectors <= dc->disk.stripe_size)
|
|
return false;
|
|
|
|
nr_sectors -= dc->disk.stripe_size;
|
|
stripe++;
|
|
}
|
|
}
|
|
|
|
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
|
|
unsigned cache_mode, bool would_skip)
|
|
{
|
|
unsigned in_use = dc->disk.c->gc_stats.in_use;
|
|
|
|
if (cache_mode != CACHE_MODE_WRITEBACK ||
|
|
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
|
|
in_use > CUTOFF_WRITEBACK_SYNC)
|
|
return false;
|
|
|
|
if (dc->partial_stripes_expensive &&
|
|
bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
|
|
bio_sectors(bio)))
|
|
return true;
|
|
|
|
if (would_skip)
|
|
return false;
|
|
|
|
return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
|
|
}
|
|
|
|
static inline void bch_writeback_queue(struct cached_dev *dc)
|
|
{
|
|
if (!IS_ERR_OR_NULL(dc->writeback_thread))
|
|
wake_up_process(dc->writeback_thread);
|
|
}
|
|
|
|
static inline void bch_writeback_add(struct cached_dev *dc)
|
|
{
|
|
if (!atomic_read(&dc->has_dirty) &&
|
|
!atomic_xchg(&dc->has_dirty, 1)) {
|
|
atomic_inc(&dc->count);
|
|
|
|
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
|
|
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
|
|
/* XXX: should do this synchronously */
|
|
bch_write_bdev_super(dc, NULL);
|
|
}
|
|
|
|
bch_writeback_queue(dc);
|
|
}
|
|
}
|
|
|
|
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
|
|
|
|
void bch_sectors_dirty_init(struct cached_dev *dc);
|
|
void bch_cached_dev_writeback_init(struct cached_dev *);
|
|
int bch_cached_dev_writeback_start(struct cached_dev *);
|
|
|
|
#endif
|