mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
block: convert blkdev_issue_flush() to use empty barriers
Then we can get rid of ->issue_flush_fn() and all the driver private implementations of that. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
bf2de6f5a4
commit
fd5d806266
@ -304,23 +304,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
|||||||
|
|
||||||
EXPORT_SYMBOL(blk_queue_ordered);
|
EXPORT_SYMBOL(blk_queue_ordered);
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_queue_issue_flush_fn - set function for issuing a flush
|
|
||||||
* @q: the request queue
|
|
||||||
* @iff: the function to be called issuing the flush
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* If a driver supports issuing a flush command, the support is notified
|
|
||||||
* to the block layer by defining it through this call.
|
|
||||||
*
|
|
||||||
**/
|
|
||||||
void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
|
|
||||||
{
|
|
||||||
q->issue_flush_fn = iff;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cache flushing for ordered writes handling
|
* Cache flushing for ordered writes handling
|
||||||
*/
|
*/
|
||||||
@ -2666,6 +2649,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|||||||
|
|
||||||
EXPORT_SYMBOL(blk_execute_rq);
|
EXPORT_SYMBOL(blk_execute_rq);
|
||||||
|
|
||||||
|
static void bio_end_empty_barrier(struct bio *bio, int err)
|
||||||
|
{
|
||||||
|
if (err)
|
||||||
|
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||||
|
|
||||||
|
complete(bio->bi_private);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blkdev_issue_flush - queue a flush
|
* blkdev_issue_flush - queue a flush
|
||||||
* @bdev: blockdev to issue flush for
|
* @bdev: blockdev to issue flush for
|
||||||
@ -2678,7 +2669,10 @@ EXPORT_SYMBOL(blk_execute_rq);
|
|||||||
*/
|
*/
|
||||||
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
||||||
{
|
{
|
||||||
|
DECLARE_COMPLETION_ONSTACK(wait);
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
struct bio *bio;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (bdev->bd_disk == NULL)
|
if (bdev->bd_disk == NULL)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
@ -2686,10 +2680,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
|||||||
q = bdev_get_queue(bdev);
|
q = bdev_get_queue(bdev);
|
||||||
if (!q)
|
if (!q)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
if (!q->issue_flush_fn)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
|
bio = bio_alloc(GFP_KERNEL, 0);
|
||||||
|
if (!bio)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
bio->bi_end_io = bio_end_empty_barrier;
|
||||||
|
bio->bi_private = &wait;
|
||||||
|
bio->bi_bdev = bdev;
|
||||||
|
submit_bio(1 << BIO_RW_BARRIER, bio);
|
||||||
|
|
||||||
|
wait_for_completion(&wait);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The driver must store the error location in ->bi_sector, if
|
||||||
|
* it supports it. For non-stacked drivers, this should be copied
|
||||||
|
* from rq->sector.
|
||||||
|
*/
|
||||||
|
if (error_sector)
|
||||||
|
*error_sector = bio->bi_sector;
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
if (!bio_flagged(bio, BIO_UPTODATE))
|
||||||
|
ret = -EIO;
|
||||||
|
|
||||||
|
bio_put(bio);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||||
|
@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
|
|||||||
req->cmd_type = REQ_TYPE_FLUSH;
|
req->cmd_type = REQ_TYPE_FLUSH;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
|
|
||||||
sector_t *sector)
|
|
||||||
{
|
|
||||||
struct ps3_storage_device *dev = q->queuedata;
|
|
||||||
struct request *req;
|
|
||||||
int res;
|
|
||||||
|
|
||||||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
|
||||||
|
|
||||||
req = blk_get_request(q, WRITE, __GFP_WAIT);
|
|
||||||
ps3disk_prepare_flush(q, req);
|
|
||||||
res = blk_execute_rq(q, gendisk, req, 0);
|
|
||||||
if (res)
|
|
||||||
dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
|
|
||||||
__func__, __LINE__, res);
|
|
||||||
blk_put_request(req);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static unsigned long ps3disk_mask;
|
static unsigned long ps3disk_mask;
|
||||||
|
|
||||||
static DEFINE_MUTEX(ps3disk_mask_mutex);
|
static DEFINE_MUTEX(ps3disk_mask_mutex);
|
||||||
@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
|
|||||||
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
||||||
blk_queue_hardsect_size(queue, dev->blk_size);
|
blk_queue_hardsect_size(queue, dev->blk_size);
|
||||||
|
|
||||||
blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
|
|
||||||
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||||
ps3disk_prepare_flush);
|
ps3disk_prepare_flush);
|
||||||
|
|
||||||
|
@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
|
|||||||
rq->buffer = rq->cmd;
|
rq->buffer = rq->cmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
ide_drive_t *drive = q->queuedata;
|
|
||||||
struct request *rq;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!drive->wcache)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
rq = blk_get_request(q, WRITE, __GFP_WAIT);
|
|
||||||
|
|
||||||
idedisk_prepare_flush(q, rq);
|
|
||||||
|
|
||||||
ret = blk_execute_rq(q, disk, rq, 0);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* if we failed and caller wants error offset, get it
|
|
||||||
*/
|
|
||||||
if (ret && error_sector)
|
|
||||||
*error_sector = ide_get_error_location(drive, rq->cmd);
|
|
||||||
|
|
||||||
blk_put_request(rq);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is tightly woven into the driver->do_special can not touch.
|
* This is tightly woven into the driver->do_special can not touch.
|
||||||
* DON'T do it again until a total personality rewrite is committed.
|
* DON'T do it again until a total personality rewrite is committed.
|
||||||
@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
|
|||||||
struct hd_driveid *id = drive->id;
|
struct hd_driveid *id = drive->id;
|
||||||
unsigned ordered = QUEUE_ORDERED_NONE;
|
unsigned ordered = QUEUE_ORDERED_NONE;
|
||||||
prepare_flush_fn *prep_fn = NULL;
|
prepare_flush_fn *prep_fn = NULL;
|
||||||
issue_flush_fn *issue_fn = NULL;
|
|
||||||
|
|
||||||
if (drive->wcache) {
|
if (drive->wcache) {
|
||||||
unsigned long long capacity;
|
unsigned long long capacity;
|
||||||
@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
|
|||||||
if (barrier) {
|
if (barrier) {
|
||||||
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
||||||
prep_fn = idedisk_prepare_flush;
|
prep_fn = idedisk_prepare_flush;
|
||||||
issue_fn = idedisk_issue_flush;
|
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ordered = QUEUE_ORDERED_DRAIN;
|
ordered = QUEUE_ORDERED_DRAIN;
|
||||||
|
|
||||||
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
||||||
blk_queue_issue_flush_fn(drive->queue, issue_fn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int write_cache(ide_drive_t *drive, int arg)
|
static int write_cache(ide_drive_t *drive, int arg)
|
||||||
|
@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int dm_table_flush_all(struct dm_table *t)
|
|
||||||
{
|
|
||||||
struct list_head *d, *devices = dm_table_get_devices(t);
|
|
||||||
int ret = 0;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < t->num_targets; i++)
|
|
||||||
if (t->targets[i].type->flush)
|
|
||||||
t->targets[i].type->flush(&t->targets[i]);
|
|
||||||
|
|
||||||
for (d = devices->next; d != devices; d = d->next) {
|
|
||||||
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
|
|
||||||
struct request_queue *q = bdev_get_queue(dd->bdev);
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!q->issue_flush_fn)
|
|
||||||
err = -EOPNOTSUPP;
|
|
||||||
else
|
|
||||||
err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
|
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
ret = err;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
||||||
{
|
{
|
||||||
dm_get(t->md);
|
dm_get(t->md);
|
||||||
@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
|
|||||||
EXPORT_SYMBOL(dm_table_put);
|
EXPORT_SYMBOL(dm_table_put);
|
||||||
EXPORT_SYMBOL(dm_table_get);
|
EXPORT_SYMBOL(dm_table_get);
|
||||||
EXPORT_SYMBOL(dm_table_unplug_all);
|
EXPORT_SYMBOL(dm_table_unplug_all);
|
||||||
EXPORT_SYMBOL(dm_table_flush_all);
|
|
||||||
|
@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
struct mapped_device *md = q->queuedata;
|
|
||||||
struct dm_table *map = dm_get_table(md);
|
|
||||||
int ret = -ENXIO;
|
|
||||||
|
|
||||||
if (map) {
|
|
||||||
ret = dm_table_flush_all(map);
|
|
||||||
dm_table_put(map);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dm_unplug_all(struct request_queue *q)
|
static void dm_unplug_all(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = q->queuedata;
|
struct mapped_device *md = q->queuedata;
|
||||||
@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
|
|||||||
blk_queue_make_request(md->queue, dm_request);
|
blk_queue_make_request(md->queue, dm_request);
|
||||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||||
md->queue->unplug_fn = dm_unplug_all;
|
md->queue->unplug_fn = dm_unplug_all;
|
||||||
md->queue->issue_flush_fn = dm_flush_all;
|
|
||||||
|
|
||||||
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
|
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
|
||||||
if (!md->io_pool)
|
if (!md->io_pool)
|
||||||
|
@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
|
|||||||
int dm_table_resume_targets(struct dm_table *t);
|
int dm_table_resume_targets(struct dm_table *t);
|
||||||
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
||||||
void dm_table_unplug_all(struct dm_table *t);
|
void dm_table_unplug_all(struct dm_table *t);
|
||||||
int dm_table_flush_all(struct dm_table *t);
|
|
||||||
|
|
||||||
/*-----------------------------------------------------------------
|
/*-----------------------------------------------------------------
|
||||||
* A registry of target types.
|
* A registry of target types.
|
||||||
|
@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
linear_conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
|
|
||||||
struct block_device *bdev = conf->disks[i].rdev->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int linear_congested(void *data, int bits)
|
static int linear_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
|
|||||||
|
|
||||||
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
||||||
mddev->queue->unplug_fn = linear_unplug;
|
mddev->queue->unplug_fn = linear_unplug;
|
||||||
mddev->queue->issue_flush_fn = linear_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
|
|||||||
mddev->pers->stop(mddev);
|
mddev->pers->stop(mddev);
|
||||||
mddev->queue->merge_bvec_fn = NULL;
|
mddev->queue->merge_bvec_fn = NULL;
|
||||||
mddev->queue->unplug_fn = NULL;
|
mddev->queue->unplug_fn = NULL;
|
||||||
mddev->queue->issue_flush_fn = NULL;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||||
if (mddev->pers->sync_request)
|
if (mddev->pers->sync_request)
|
||||||
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
|
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
|
||||||
|
@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
|
|||||||
seq_printf (seq, "]");
|
seq_printf (seq, "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
multipath_conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
|
||||||
struct block_device *bdev = rdev->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else {
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
|
||||||
error_sector);
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
static int multipath_congested(void *data, int bits)
|
static int multipath_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
|
|||||||
mddev->array_size = mddev->size;
|
mddev->array_size = mddev->size;
|
||||||
|
|
||||||
mddev->queue->unplug_fn = multipath_unplug;
|
mddev->queue->unplug_fn = multipath_unplug;
|
||||||
mddev->queue->issue_flush_fn = multipath_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
|
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
raid0_conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
mdk_rdev_t **devlist = conf->strip_zone[0].dev;
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
|
||||||
struct block_device *bdev = devlist[i]->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid0_congested(void *data, int bits)
|
static int raid0_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
|
|||||||
|
|
||||||
mddev->queue->unplug_fn = raid0_unplug;
|
mddev->queue->unplug_fn = raid0_unplug;
|
||||||
|
|
||||||
mddev->queue->issue_flush_fn = raid0_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
|
|||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
|
||||||
struct block_device *bdev = rdev->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else {
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
|
||||||
error_sector);
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid1_congested(void *data, int bits)
|
static int raid1_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
|
|||||||
mddev->array_size = mddev->size;
|
mddev->array_size = mddev->size;
|
||||||
|
|
||||||
mddev->queue->unplug_fn = raid1_unplug;
|
mddev->queue->unplug_fn = raid1_unplug;
|
||||||
mddev->queue->issue_flush_fn = raid1_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
|
|||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
|
||||||
struct block_device *bdev = rdev->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else {
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
|
||||||
error_sector);
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid10_congested(void *data, int bits)
|
static int raid10_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
|
|||||||
mddev->resync_max_sectors = size << conf->chunk_shift;
|
mddev->resync_max_sectors = size << conf->chunk_shift;
|
||||||
|
|
||||||
mddev->queue->unplug_fn = raid10_unplug;
|
mddev->queue->unplug_fn = raid10_unplug;
|
||||||
mddev->queue->issue_flush_fn = raid10_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
|
|||||||
unplug_slaves(mddev);
|
unplug_slaves(mddev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
raid5_conf_t *conf = mddev_to_conf(mddev);
|
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
|
||||||
struct block_device *bdev = rdev->bdev;
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (!r_queue->issue_flush_fn)
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
else {
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
|
||||||
error_sector);
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid5_congested(void *data, int bits)
|
static int raid5_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
|
|||||||
mdname(mddev));
|
mdname(mddev));
|
||||||
|
|
||||||
mddev->queue->unplug_fn = raid5_unplug_device;
|
mddev->queue->unplug_fn = raid5_unplug_device;
|
||||||
mddev->queue->issue_flush_fn = raid5_issue_flush;
|
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
||||||
|
|
||||||
|
@ -148,29 +148,6 @@ static int i2o_block_device_flush(struct i2o_device *dev)
|
|||||||
return i2o_msg_post_wait(dev->iop, msg, 60);
|
return i2o_msg_post_wait(dev->iop, msg, 60);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* i2o_block_issue_flush - device-flush interface for block-layer
|
|
||||||
* @queue: the request queue of the device which should be flushed
|
|
||||||
* @disk: gendisk
|
|
||||||
* @error_sector: error offset
|
|
||||||
*
|
|
||||||
* Helper function to provide flush functionality to block-layer.
|
|
||||||
*
|
|
||||||
* Returns 0 on success or negative error code on failure.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
|
|
||||||
sector_t * error_sector)
|
|
||||||
{
|
|
||||||
struct i2o_block_device *i2o_blk_dev = queue->queuedata;
|
|
||||||
int rc = -ENODEV;
|
|
||||||
|
|
||||||
if (likely(i2o_blk_dev))
|
|
||||||
rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i2o_block_device_mount - Mount (load) the media of device dev
|
* i2o_block_device_mount - Mount (load) the media of device dev
|
||||||
* @dev: I2O device which should receive the mount request
|
* @dev: I2O device which should receive the mount request
|
||||||
@ -1009,7 +986,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
|
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
|
||||||
blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
|
|
||||||
|
|
||||||
gd->major = I2O_MAJOR;
|
gd->major = I2O_MAJOR;
|
||||||
gd->queue = queue;
|
gd->queue = queue;
|
||||||
|
@ -826,27 +826,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
|
|
||||||
sector_t *error_sector)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct scsi_device *sdp = q->queuedata;
|
|
||||||
struct scsi_disk *sdkp;
|
|
||||||
|
|
||||||
if (sdp->sdev_state != SDEV_RUNNING)
|
|
||||||
return -ENXIO;
|
|
||||||
|
|
||||||
sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
|
|
||||||
|
|
||||||
if (!sdkp)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (sdkp->WCE)
|
|
||||||
ret = sd_sync_cache(sdkp);
|
|
||||||
scsi_disk_put(sdkp);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sd_prepare_flush(struct request_queue *q, struct request *rq)
|
static void sd_prepare_flush(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
memset(rq->cmd, 0, sizeof(rq->cmd));
|
memset(rq->cmd, 0, sizeof(rq->cmd));
|
||||||
@ -1697,7 +1676,6 @@ static int sd_probe(struct device *dev)
|
|||||||
sd_revalidate_disk(gd);
|
sd_revalidate_disk(gd);
|
||||||
|
|
||||||
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
|
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
|
||||||
blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
|
|
||||||
|
|
||||||
gd->driverfs_dev = &sdp->sdev_gendev;
|
gd->driverfs_dev = &sdp->sdev_gendev;
|
||||||
gd->flags = GENHD_FL_DRIVERFS;
|
gd->flags = GENHD_FL_DRIVERFS;
|
||||||
|
@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
|
|||||||
|
|
||||||
struct bio_vec;
|
struct bio_vec;
|
||||||
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
|
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
|
||||||
typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
|
|
||||||
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
|
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
|
||||||
typedef void (softirq_done_fn)(struct request *);
|
typedef void (softirq_done_fn)(struct request *);
|
||||||
|
|
||||||
@ -368,7 +367,6 @@ struct request_queue
|
|||||||
prep_rq_fn *prep_rq_fn;
|
prep_rq_fn *prep_rq_fn;
|
||||||
unplug_fn *unplug_fn;
|
unplug_fn *unplug_fn;
|
||||||
merge_bvec_fn *merge_bvec_fn;
|
merge_bvec_fn *merge_bvec_fn;
|
||||||
issue_flush_fn *issue_flush_fn;
|
|
||||||
prepare_flush_fn *prepare_flush_fn;
|
prepare_flush_fn *prepare_flush_fn;
|
||||||
softirq_done_fn *softirq_done_fn;
|
softirq_done_fn *softirq_done_fn;
|
||||||
|
|
||||||
@ -770,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
|
|||||||
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
||||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||||
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
||||||
extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
|
|
||||||
extern int blk_do_ordered(struct request_queue *, struct request **);
|
extern int blk_do_ordered(struct request_queue *, struct request **);
|
||||||
extern unsigned blk_ordered_cur_seq(struct request_queue *);
|
extern unsigned blk_ordered_cur_seq(struct request_queue *);
|
||||||
extern unsigned blk_ordered_req_seq(struct request *);
|
extern unsigned blk_ordered_req_seq(struct request *);
|
||||||
|
Loading…
Reference in New Issue
Block a user