mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
raid5: fix 2 bugs in ops_complete_biofill
1/ ops_complete_biofill tried to avoid calling handle_stripe since all the state necessary to return read completions is available. However the process of determining whether more read requests are pending requires locking the stripe (to block add_stripe_bio from updating dev->toead). ops_complete_biofill can run in tasklet context, so rather than upgrading all the stripe locks from spin_lock to spin_lock_bh this patch just unconditionally reschedules handle_stripe after completing the read request. 2/ ops_complete_biofill needlessly qualified processing R5_Wantfill with dev->toread. The result being that the 'biofill' pending bit is cleared before handling the pending read-completions on dev->read. R5_Wantfill can be unconditionally handled because the 'biofill' pending bit prevents new R5_Wantfill requests from being seen by ops_run_biofill and ops_complete_biofill. Found-by: Yuri Tikhonov <yur@emcraft.com> [neilb@suse.de: simpler fix for bug 1 than moving code] Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
6247cdc2cd
commit
e4d84909dd
@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
|
||||
struct stripe_head *sh = stripe_head_ref;
|
||||
struct bio *return_bi = NULL;
|
||||
raid5_conf_t *conf = sh->raid_conf;
|
||||
int i, more_to_read = 0;
|
||||
int i;
|
||||
|
||||
pr_debug("%s: stripe %llu\n", __FUNCTION__,
|
||||
(unsigned long long)sh->sector);
|
||||
@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
|
||||
/* clear completed biofills */
|
||||
for (i = sh->disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
/* check if this stripe has new incoming reads */
|
||||
if (dev->toread)
|
||||
more_to_read++;
|
||||
|
||||
/* acknowledge completion of a biofill operation */
|
||||
/* and check if we need to reply to a read request
|
||||
*/
|
||||
if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
|
||||
/* and check if we need to reply to a read request,
|
||||
* new R5_Wantfill requests are held off until
|
||||
* !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
|
||||
*/
|
||||
if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
|
||||
struct bio *rbi, *rbi2;
|
||||
clear_bit(R5_Wantfill, &dev->flags);
|
||||
|
||||
/* The access to dev->read is outside of the
|
||||
* spin_lock_irq(&conf->device_lock), but is protected
|
||||
@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
|
||||
|
||||
return_io(return_bi);
|
||||
|
||||
if (more_to_read)
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
release_stripe(sh);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user