mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 03:21:32 +00:00
md/raid10: add rcu protection to rdev access during reshape.
mirrors[].rdev can become NULL at any point unless: - a counted reference is held - ->reconfig_mutex is held, or - rcu_read_lock() is held Reshape isn't always suitably careful as in the past rdev couldn't be removed during reshape. It can now, so add protection. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
This commit is contained in:
parent
f90145f317
commit
d094d6860b
@ -4361,15 +4361,16 @@ read_more:
|
|||||||
blist = read_bio;
|
blist = read_bio;
|
||||||
read_bio->bi_next = NULL;
|
read_bio->bi_next = NULL;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
for (s = 0; s < conf->copies*2; s++) {
|
for (s = 0; s < conf->copies*2; s++) {
|
||||||
struct bio *b;
|
struct bio *b;
|
||||||
int d = r10_bio->devs[s/2].devnum;
|
int d = r10_bio->devs[s/2].devnum;
|
||||||
struct md_rdev *rdev2;
|
struct md_rdev *rdev2;
|
||||||
if (s&1) {
|
if (s&1) {
|
||||||
rdev2 = conf->mirrors[d].replacement;
|
rdev2 = rcu_dereference(conf->mirrors[d].replacement);
|
||||||
b = r10_bio->devs[s/2].repl_bio;
|
b = r10_bio->devs[s/2].repl_bio;
|
||||||
} else {
|
} else {
|
||||||
rdev2 = conf->mirrors[d].rdev;
|
rdev2 = rcu_dereference(conf->mirrors[d].rdev);
|
||||||
b = r10_bio->devs[s/2].bio;
|
b = r10_bio->devs[s/2].bio;
|
||||||
}
|
}
|
||||||
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
|
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
|
||||||
@ -4414,6 +4415,7 @@ read_more:
|
|||||||
nr_sectors += len >> 9;
|
nr_sectors += len >> 9;
|
||||||
}
|
}
|
||||||
bio_full:
|
bio_full:
|
||||||
|
rcu_read_unlock();
|
||||||
r10_bio->sectors = nr_sectors;
|
r10_bio->sectors = nr_sectors;
|
||||||
|
|
||||||
/* Now submit the read */
|
/* Now submit the read */
|
||||||
@ -4465,16 +4467,20 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
|||||||
struct bio *b;
|
struct bio *b;
|
||||||
int d = r10_bio->devs[s/2].devnum;
|
int d = r10_bio->devs[s/2].devnum;
|
||||||
struct md_rdev *rdev;
|
struct md_rdev *rdev;
|
||||||
|
rcu_read_lock();
|
||||||
if (s&1) {
|
if (s&1) {
|
||||||
rdev = conf->mirrors[d].replacement;
|
rdev = rcu_dereference(conf->mirrors[d].replacement);
|
||||||
b = r10_bio->devs[s/2].repl_bio;
|
b = r10_bio->devs[s/2].repl_bio;
|
||||||
} else {
|
} else {
|
||||||
rdev = conf->mirrors[d].rdev;
|
rdev = rcu_dereference(conf->mirrors[d].rdev);
|
||||||
b = r10_bio->devs[s/2].bio;
|
b = r10_bio->devs[s/2].bio;
|
||||||
}
|
}
|
||||||
if (!rdev || test_bit(Faulty, &rdev->flags))
|
if (!rdev || test_bit(Faulty, &rdev->flags)) {
|
||||||
|
rcu_read_unlock();
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
atomic_inc(&rdev->nr_pending);
|
atomic_inc(&rdev->nr_pending);
|
||||||
|
rcu_read_unlock();
|
||||||
md_sync_acct(b->bi_bdev, r10_bio->sectors);
|
md_sync_acct(b->bi_bdev, r10_bio->sectors);
|
||||||
atomic_inc(&r10_bio->remaining);
|
atomic_inc(&r10_bio->remaining);
|
||||||
b->bi_next = NULL;
|
b->bi_next = NULL;
|
||||||
@ -4535,9 +4541,10 @@ static int handle_reshape_read_error(struct mddev *mddev,
|
|||||||
if (s > (PAGE_SIZE >> 9))
|
if (s > (PAGE_SIZE >> 9))
|
||||||
s = PAGE_SIZE >> 9;
|
s = PAGE_SIZE >> 9;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
while (!success) {
|
while (!success) {
|
||||||
int d = r10b->devs[slot].devnum;
|
int d = r10b->devs[slot].devnum;
|
||||||
struct md_rdev *rdev = conf->mirrors[d].rdev;
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
|
||||||
sector_t addr;
|
sector_t addr;
|
||||||
if (rdev == NULL ||
|
if (rdev == NULL ||
|
||||||
test_bit(Faulty, &rdev->flags) ||
|
test_bit(Faulty, &rdev->flags) ||
|
||||||
@ -4545,11 +4552,15 @@ static int handle_reshape_read_error(struct mddev *mddev,
|
|||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
|
addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
|
||||||
|
atomic_inc(&rdev->nr_pending);
|
||||||
|
rcu_read_unlock();
|
||||||
success = sync_page_io(rdev,
|
success = sync_page_io(rdev,
|
||||||
addr,
|
addr,
|
||||||
s << 9,
|
s << 9,
|
||||||
bvec[idx].bv_page,
|
bvec[idx].bv_page,
|
||||||
READ, false);
|
READ, false);
|
||||||
|
rdev_dec_pending(rdev, mddev);
|
||||||
|
rcu_read_lock();
|
||||||
if (success)
|
if (success)
|
||||||
break;
|
break;
|
||||||
failed:
|
failed:
|
||||||
@ -4559,6 +4570,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
|
|||||||
if (slot == first_slot)
|
if (slot == first_slot)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
if (!success) {
|
if (!success) {
|
||||||
/* couldn't read this block, must give up */
|
/* couldn't read this block, must give up */
|
||||||
set_bit(MD_RECOVERY_INTR,
|
set_bit(MD_RECOVERY_INTR,
|
||||||
@ -4628,16 +4640,18 @@ static void raid10_finish_reshape(struct mddev *mddev)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int d;
|
int d;
|
||||||
|
rcu_read_lock();
|
||||||
for (d = conf->geo.raid_disks ;
|
for (d = conf->geo.raid_disks ;
|
||||||
d < conf->geo.raid_disks - mddev->delta_disks;
|
d < conf->geo.raid_disks - mddev->delta_disks;
|
||||||
d++) {
|
d++) {
|
||||||
struct md_rdev *rdev = conf->mirrors[d].rdev;
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
|
||||||
if (rdev)
|
if (rdev)
|
||||||
clear_bit(In_sync, &rdev->flags);
|
clear_bit(In_sync, &rdev->flags);
|
||||||
rdev = conf->mirrors[d].replacement;
|
rdev = rcu_dereference(conf->mirrors[d].replacement);
|
||||||
if (rdev)
|
if (rdev)
|
||||||
clear_bit(In_sync, &rdev->flags);
|
clear_bit(In_sync, &rdev->flags);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
mddev->layout = mddev->new_layout;
|
mddev->layout = mddev->new_layout;
|
||||||
mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
|
mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
|
||||||
|
Loading…
Reference in New Issue
Block a user