mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
c7b7bd0740
If a bcache device is configured to writeback mode, current code does not handle write I/O errors on backing devices properly. In writeback mode, write request is written to cache device, and latter being flushed to backing device. If I/O failed when writing from cache device to the backing device, bcache code just ignores the error and upper layer code is NOT noticed that the backing device is broken. This patch tries to handle backing device failure like how the cache device failure is handled, - Add a error counter 'io_errors' and error limit 'error_limit' in struct cached_dev. Add another io_disable to struct cached_dev to disable I/Os on the problematic backing device. - When I/O error happens on backing device, increase io_errors counter. And if io_errors reaches error_limit, set cache_dev->io_disable to true, and stop the bcache device. The result is, if backing device is broken of disconnected, and I/O errors reach its error limit, backing device will be disabled and the associated bcache device will be removed from system. Changelog: v2: remove "bcache: " prefix in pr_error(), and use correct name string to print out bcache device gendisk name. v1: indeed this is new added in v2 patch set. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Michael Lyle <mlyle@lyle.org> Cc: Michael Lyle <mlyle@lyle.org> Cc: Junhui Tang <tang.junhui@zte.com.cn> Signed-off-by: Jens Axboe <axboe@kernel.dk>
163 lines
3.8 KiB
C
163 lines
3.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Some low level IO code, and hacks for various block layer limitations
|
|
*
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
* Copyright 2012 Google, Inc.
|
|
*/
|
|
|
|
#include "bcache.h"
|
|
#include "bset.h"
|
|
#include "debug.h"
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
/* Bios with headers */
|
|
|
|
void bch_bbio_free(struct bio *bio, struct cache_set *c)
|
|
{
|
|
struct bbio *b = container_of(bio, struct bbio, bio);
|
|
mempool_free(b, c->bio_meta);
|
|
}
|
|
|
|
struct bio *bch_bbio_alloc(struct cache_set *c)
|
|
{
|
|
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
|
|
struct bio *bio = &b->bio;
|
|
|
|
bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
|
|
|
|
return bio;
|
|
}
|
|
|
|
void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
|
|
{
|
|
struct bbio *b = container_of(bio, struct bbio, bio);
|
|
|
|
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
|
|
bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
|
|
|
|
b->submit_time_us = local_clock_us();
|
|
closure_bio_submit(c, bio, bio->bi_private);
|
|
}
|
|
|
|
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
|
|
struct bkey *k, unsigned ptr)
|
|
{
|
|
struct bbio *b = container_of(bio, struct bbio, bio);
|
|
bch_bkey_copy_single_ptr(&b->key, k, ptr);
|
|
__bch_submit_bbio(bio, c);
|
|
}
|
|
|
|
/* IO errors */
|
|
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
|
|
{
|
|
char buf[BDEVNAME_SIZE];
|
|
unsigned errors;
|
|
|
|
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
|
|
|
|
errors = atomic_add_return(1, &dc->io_errors);
|
|
if (errors < dc->error_limit)
|
|
pr_err("%s: IO error on backing device, unrecoverable",
|
|
bio_devname(bio, buf));
|
|
else
|
|
bch_cached_dev_error(dc);
|
|
}
|
|
|
|
void bch_count_io_errors(struct cache *ca,
|
|
blk_status_t error,
|
|
int is_read,
|
|
const char *m)
|
|
{
|
|
/*
|
|
* The halflife of an error is:
|
|
* log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
|
|
*/
|
|
|
|
if (ca->set->error_decay) {
|
|
unsigned count = atomic_inc_return(&ca->io_count);
|
|
|
|
while (count > ca->set->error_decay) {
|
|
unsigned errors;
|
|
unsigned old = count;
|
|
unsigned new = count - ca->set->error_decay;
|
|
|
|
/*
|
|
* First we subtract refresh from count; each time we
|
|
* succesfully do so, we rescale the errors once:
|
|
*/
|
|
|
|
count = atomic_cmpxchg(&ca->io_count, old, new);
|
|
|
|
if (count == old) {
|
|
count = new;
|
|
|
|
errors = atomic_read(&ca->io_errors);
|
|
do {
|
|
old = errors;
|
|
new = ((uint64_t) errors * 127) / 128;
|
|
errors = atomic_cmpxchg(&ca->io_errors,
|
|
old, new);
|
|
} while (old != errors);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (error) {
|
|
char buf[BDEVNAME_SIZE];
|
|
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
|
|
&ca->io_errors);
|
|
errors >>= IO_ERROR_SHIFT;
|
|
|
|
if (errors < ca->set->error_limit)
|
|
pr_err("%s: IO error on %s%s",
|
|
bdevname(ca->bdev, buf), m,
|
|
is_read ? ", recovering." : ".");
|
|
else
|
|
bch_cache_set_error(ca->set,
|
|
"%s: too many IO errors %s",
|
|
bdevname(ca->bdev, buf), m);
|
|
}
|
|
}
|
|
|
|
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
|
|
blk_status_t error, const char *m)
|
|
{
|
|
struct bbio *b = container_of(bio, struct bbio, bio);
|
|
struct cache *ca = PTR_CACHE(c, &b->key, 0);
|
|
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
|
|
|
|
unsigned threshold = op_is_write(bio_op(bio))
|
|
? c->congested_write_threshold_us
|
|
: c->congested_read_threshold_us;
|
|
|
|
if (threshold) {
|
|
unsigned t = local_clock_us();
|
|
|
|
int us = t - b->submit_time_us;
|
|
int congested = atomic_read(&c->congested);
|
|
|
|
if (us > (int) threshold) {
|
|
int ms = us / 1024;
|
|
c->congested_last_us = t;
|
|
|
|
ms = min(ms, CONGESTED_MAX + congested);
|
|
atomic_sub(ms, &c->congested);
|
|
} else if (congested < 0)
|
|
atomic_inc(&c->congested);
|
|
}
|
|
|
|
bch_count_io_errors(ca, error, is_read, m);
|
|
}
|
|
|
|
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
|
|
blk_status_t error, const char *m)
|
|
{
|
|
struct closure *cl = bio->bi_private;
|
|
|
|
bch_bbio_count_io_errors(c, bio, error, m);
|
|
bio_put(bio);
|
|
closure_put(cl);
|
|
}
|