2013-06-05 13:21:07 +00:00
|
|
|
#ifndef _BCACHE_WRITEBACK_H
|
|
|
|
#define _BCACHE_WRITEBACK_H
|
|
|
|
|
2013-06-05 13:24:39 +00:00
|
|
|
#define CUTOFF_WRITEBACK 40
|
|
|
|
#define CUTOFF_WRITEBACK_SYNC 70
|
|
|
|
|
2013-06-05 13:21:07 +00:00
|
|
|
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
|
|
|
|
{
|
|
|
|
uint64_t i, ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < d->nr_stripes; i++)
|
|
|
|
ret += atomic_read(d->stripe_sectors_dirty + i);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-06 06:25:56 +00:00
|
|
|
static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
|
|
|
|
{
|
|
|
|
uint64_t i, ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&bch_register_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < c->nr_uuids; i++) {
|
|
|
|
struct bcache_device *d = c->devices[i];
|
|
|
|
|
|
|
|
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
|
|
|
|
continue;
|
|
|
|
ret += bcache_dev_sectors_dirty(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&bch_register_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-10-31 22:43:22 +00:00
|
|
|
static inline unsigned offset_to_stripe(struct bcache_device *d,
|
|
|
|
uint64_t offset)
|
|
|
|
{
|
|
|
|
do_div(offset, d->stripe_size);
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
|
2013-06-05 13:24:39 +00:00
|
|
|
uint64_t offset,
|
|
|
|
unsigned nr_sectors)
|
|
|
|
{
|
2013-10-31 22:43:22 +00:00
|
|
|
unsigned stripe = offset_to_stripe(&dc->disk, offset);
|
2013-06-05 13:24:39 +00:00
|
|
|
|
|
|
|
while (1) {
|
2013-10-31 22:43:22 +00:00
|
|
|
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
|
2013-06-05 13:24:39 +00:00
|
|
|
return true;
|
|
|
|
|
2013-10-31 22:43:22 +00:00
|
|
|
if (nr_sectors <= dc->disk.stripe_size)
|
2013-06-05 13:24:39 +00:00
|
|
|
return false;
|
|
|
|
|
2013-10-31 22:43:22 +00:00
|
|
|
nr_sectors -= dc->disk.stripe_size;
|
2013-06-05 13:24:39 +00:00
|
|
|
stripe++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
|
|
|
|
unsigned cache_mode, bool would_skip)
|
|
|
|
{
|
|
|
|
unsigned in_use = dc->disk.c->gc_stats.in_use;
|
|
|
|
|
|
|
|
if (cache_mode != CACHE_MODE_WRITEBACK ||
|
2013-08-22 00:49:09 +00:00
|
|
|
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
|
2013-06-05 13:24:39 +00:00
|
|
|
in_use > CUTOFF_WRITEBACK_SYNC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (dc->partial_stripes_expensive &&
|
2013-10-11 22:44:27 +00:00
|
|
|
bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
|
2013-06-05 13:24:39 +00:00
|
|
|
bio_sectors(bio)))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (would_skip)
|
|
|
|
return false;
|
|
|
|
|
2016-11-01 13:40:05 +00:00
|
|
|
return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
|
2013-06-05 13:24:39 +00:00
|
|
|
}
|
|
|
|
|
2013-07-25 00:50:06 +00:00
|
|
|
static inline void bch_writeback_queue(struct cached_dev *dc)
|
|
|
|
{
|
2015-11-30 02:44:49 +00:00
|
|
|
if (!IS_ERR_OR_NULL(dc->writeback_thread))
|
|
|
|
wake_up_process(dc->writeback_thread);
|
2013-07-25 00:50:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void bch_writeback_add(struct cached_dev *dc)
|
|
|
|
{
|
|
|
|
if (!atomic_read(&dc->has_dirty) &&
|
|
|
|
!atomic_xchg(&dc->has_dirty, 1)) {
|
|
|
|
atomic_inc(&dc->count);
|
|
|
|
|
|
|
|
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
|
|
|
|
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
|
|
|
|
/* XXX: should do this synchronously */
|
|
|
|
bch_write_bdev_super(dc, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
bch_writeback_queue(dc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 13:21:07 +00:00
|
|
|
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
|
|
|
|
|
2017-09-06 17:28:53 +00:00
|
|
|
void bch_sectors_dirty_init(struct bcache_device *);
|
2014-05-01 20:48:57 +00:00
|
|
|
void bch_cached_dev_writeback_init(struct cached_dev *);
|
|
|
|
int bch_cached_dev_writeback_start(struct cached_dev *);
|
2013-06-05 13:21:07 +00:00
|
|
|
|
|
|
|
#endif
|