mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
[PATCH] md: set the unplug_fn and issue_flush_fn for md devices *after* committed to creation
We we set the too early, they may still be in place and possibly get called even though the array didn't get set up properly. Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
29ac8e056f
commit
7a5febe9ff
@ -462,10 +462,6 @@ static int multipath_run (mddev_t *mddev)
|
||||
}
|
||||
memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
|
||||
|
||||
mddev->queue->unplug_fn = multipath_unplug;
|
||||
|
||||
mddev->queue->issue_flush_fn = multipath_issue_flush;
|
||||
|
||||
conf->working_disks = 0;
|
||||
ITERATE_RDEV(mddev,rdev,tmp) {
|
||||
disk_idx = rdev->raid_disk;
|
||||
@ -528,6 +524,10 @@ static int multipath_run (mddev_t *mddev)
|
||||
* Ok, everything is just fine now
|
||||
*/
|
||||
mddev->array_size = mddev->size;
|
||||
|
||||
mddev->queue->unplug_fn = multipath_unplug;
|
||||
mddev->queue->issue_flush_fn = multipath_issue_flush;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_conf:
|
||||
|
@ -1197,10 +1197,6 @@ static int run(mddev_t *mddev)
|
||||
if (!conf->r1bio_pool)
|
||||
goto out_no_mem;
|
||||
|
||||
mddev->queue->unplug_fn = raid1_unplug;
|
||||
|
||||
mddev->queue->issue_flush_fn = raid1_issue_flush;
|
||||
|
||||
ITERATE_RDEV(mddev, rdev, tmp) {
|
||||
disk_idx = rdev->raid_disk;
|
||||
if (disk_idx >= mddev->raid_disks
|
||||
@ -1282,6 +1278,9 @@ static int run(mddev_t *mddev)
|
||||
*/
|
||||
mddev->array_size = mddev->size;
|
||||
|
||||
mddev->queue->unplug_fn = raid1_unplug;
|
||||
mddev->queue->issue_flush_fn = raid1_issue_flush;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_mem:
|
||||
|
@ -1639,9 +1639,6 @@ static int run(mddev_t *mddev)
|
||||
mdname(mddev));
|
||||
goto out_free_conf;
|
||||
}
|
||||
mddev->queue->unplug_fn = raid10_unplug;
|
||||
|
||||
mddev->queue->issue_flush_fn = raid10_issue_flush;
|
||||
|
||||
ITERATE_RDEV(mddev, rdev, tmp) {
|
||||
disk_idx = rdev->raid_disk;
|
||||
@ -1713,6 +1710,9 @@ static int run(mddev_t *mddev)
|
||||
mddev->array_size = size/2;
|
||||
mddev->resync_max_sectors = size;
|
||||
|
||||
mddev->queue->unplug_fn = raid10_unplug;
|
||||
mddev->queue->issue_flush_fn = raid10_issue_flush;
|
||||
|
||||
/* Calculate max read-ahead size.
|
||||
* We need to readahead at least twice a whole stripe....
|
||||
* maybe...
|
||||
|
@ -1620,9 +1620,6 @@ static int run (mddev_t *mddev)
|
||||
atomic_set(&conf->active_stripes, 0);
|
||||
atomic_set(&conf->preread_active_stripes, 0);
|
||||
|
||||
mddev->queue->unplug_fn = raid5_unplug_device;
|
||||
mddev->queue->issue_flush_fn = raid5_issue_flush;
|
||||
|
||||
PRINTK("raid5: run(%s) called.\n", mdname(mddev));
|
||||
|
||||
ITERATE_RDEV(mddev,rdev,tmp) {
|
||||
@ -1728,6 +1725,10 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
|
||||
}
|
||||
|
||||
/* Ok, everything is just fine now */
|
||||
|
||||
mddev->queue->unplug_fn = raid5_unplug_device;
|
||||
mddev->queue->issue_flush_fn = raid5_issue_flush;
|
||||
|
||||
mddev->array_size = mddev->size * (mddev->raid_disks - 1);
|
||||
return 0;
|
||||
abort:
|
||||
|
@ -1779,9 +1779,6 @@ static int run (mddev_t *mddev)
|
||||
atomic_set(&conf->active_stripes, 0);
|
||||
atomic_set(&conf->preread_active_stripes, 0);
|
||||
|
||||
mddev->queue->unplug_fn = raid6_unplug_device;
|
||||
mddev->queue->issue_flush_fn = raid6_issue_flush;
|
||||
|
||||
PRINTK("raid6: run(%s) called.\n", mdname(mddev));
|
||||
|
||||
ITERATE_RDEV(mddev,rdev,tmp) {
|
||||
@ -1895,6 +1892,9 @@ static int run (mddev_t *mddev)
|
||||
|
||||
/* Ok, everything is just fine now */
|
||||
mddev->array_size = mddev->size * (mddev->raid_disks - 2);
|
||||
|
||||
mddev->queue->unplug_fn = raid6_unplug_device;
|
||||
mddev->queue->issue_flush_fn = raid6_issue_flush;
|
||||
return 0;
|
||||
abort:
|
||||
if (conf) {
|
||||
|
Loading…
Reference in New Issue
Block a user