mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
md: add a new helper rdev_has_badblock()
The current api is_badblock() must pass in 'first_bad' and 'bad_sectors', however, many caller just want to know if there are badblocks or not, and these caller must define two local variable that will never be used. Add a new helper rdev_has_badblock() that will only return if there are badblocks or not, remove unnecessary local variables and replace is_badblock() with the new helper in many places. There are no functional changes, and the new helper will also be used later to refactor read_balance(). Co-developed-by: Paul Luse <paul.e.luse@linux.intel.com> Signed-off-by: Paul Luse <paul.e.luse@linux.intel.com> Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Xiao Ni <xni@redhat.com> Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20240229095714.926789-2-yukuai1@huaweicloud.com
This commit is contained in:
parent
dfd2bf4367
commit
3a0f007b69
@ -222,6 +222,16 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
|
||||
int sectors)
|
||||
{
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
|
||||
}
|
||||
|
||||
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
||||
int is_new);
|
||||
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
||||
|
@ -498,9 +498,6 @@ static void raid1_end_write_request(struct bio *bio)
|
||||
* to user-side. So if something waits for IO, then it
|
||||
* will wait for the 'master' bio.
|
||||
*/
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
r1_bio->bios[mirror] = NULL;
|
||||
to_put = bio;
|
||||
/*
|
||||
@ -516,8 +513,8 @@ static void raid1_end_write_request(struct bio *bio)
|
||||
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
||||
|
||||
/* Maybe we can clear some bad blocks. */
|
||||
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
|
||||
&first_bad, &bad_sectors) && !discard_error) {
|
||||
if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
|
||||
!discard_error) {
|
||||
r1_bio->bios[mirror] = IO_MADE_GOOD;
|
||||
set_bit(R1BIO_MadeGood, &r1_bio->state);
|
||||
}
|
||||
@ -1944,8 +1941,6 @@ static void end_sync_write(struct bio *bio)
|
||||
struct r1bio *r1_bio = get_resync_r1bio(bio);
|
||||
struct mddev *mddev = r1_bio->mddev;
|
||||
struct r1conf *conf = mddev->private;
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
|
||||
|
||||
if (!uptodate) {
|
||||
@ -1955,14 +1950,11 @@ static void end_sync_write(struct bio *bio)
|
||||
set_bit(MD_RECOVERY_NEEDED, &
|
||||
mddev->recovery);
|
||||
set_bit(R1BIO_WriteError, &r1_bio->state);
|
||||
} else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
|
||||
&first_bad, &bad_sectors) &&
|
||||
!is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
|
||||
r1_bio->sector,
|
||||
r1_bio->sectors,
|
||||
&first_bad, &bad_sectors)
|
||||
)
|
||||
} else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
|
||||
!rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
|
||||
r1_bio->sector, r1_bio->sectors)) {
|
||||
set_bit(R1BIO_MadeGood, &r1_bio->state);
|
||||
}
|
||||
|
||||
put_sync_write_buf(r1_bio, uptodate);
|
||||
}
|
||||
@ -2279,16 +2271,12 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
||||
s = PAGE_SIZE >> 9;
|
||||
|
||||
do {
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
rdev = conf->mirrors[d].rdev;
|
||||
if (rdev &&
|
||||
(test_bit(In_sync, &rdev->flags) ||
|
||||
(!test_bit(Faulty, &rdev->flags) &&
|
||||
rdev->recovery_offset >= sect + s)) &&
|
||||
is_badblock(rdev, sect, s,
|
||||
&first_bad, &bad_sectors) == 0) {
|
||||
rdev_has_badblock(rdev, sect, s) == 0) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
if (sync_page_io(rdev, sect, s<<9,
|
||||
conf->tmppage, REQ_OP_READ, false))
|
||||
|
@ -518,11 +518,7 @@ static void raid10_end_write_request(struct bio *bio)
|
||||
* The 'master' represents the composite IO operation to
|
||||
* user-side. So if something waits for IO, then it will
|
||||
* wait for the 'master' bio.
|
||||
*/
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
/*
|
||||
*
|
||||
* Do not set R10BIO_Uptodate if the current device is
|
||||
* rebuilding or Faulty. This is because we cannot use
|
||||
* such device for properly reading the data back (we could
|
||||
@ -535,10 +531,9 @@ static void raid10_end_write_request(struct bio *bio)
|
||||
set_bit(R10BIO_Uptodate, &r10_bio->state);
|
||||
|
||||
/* Maybe we can clear some bad blocks. */
|
||||
if (is_badblock(rdev,
|
||||
r10_bio->devs[slot].addr,
|
||||
r10_bio->sectors,
|
||||
&first_bad, &bad_sectors) && !discard_error) {
|
||||
if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
|
||||
r10_bio->sectors) &&
|
||||
!discard_error) {
|
||||
bio_put(bio);
|
||||
if (repl)
|
||||
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
|
||||
@ -1330,10 +1325,7 @@ retry_wait:
|
||||
}
|
||||
|
||||
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
|
||||
sector_t first_bad;
|
||||
sector_t dev_sector = r10_bio->devs[i].addr;
|
||||
int bad_sectors;
|
||||
int is_bad;
|
||||
|
||||
/*
|
||||
* Discard request doesn't care the write result
|
||||
@ -1342,9 +1334,8 @@ retry_wait:
|
||||
if (!r10_bio->sectors)
|
||||
continue;
|
||||
|
||||
is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
|
||||
&first_bad, &bad_sectors);
|
||||
if (is_bad < 0) {
|
||||
if (rdev_has_badblock(rdev, dev_sector,
|
||||
r10_bio->sectors) < 0) {
|
||||
/*
|
||||
* Mustn't write here until the bad block
|
||||
* is acknowledged
|
||||
@ -2290,8 +2281,6 @@ static void end_sync_write(struct bio *bio)
|
||||
struct mddev *mddev = r10_bio->mddev;
|
||||
struct r10conf *conf = mddev->private;
|
||||
int d;
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
int slot;
|
||||
int repl;
|
||||
struct md_rdev *rdev = NULL;
|
||||
@ -2312,11 +2301,10 @@ static void end_sync_write(struct bio *bio)
|
||||
&rdev->mddev->recovery);
|
||||
set_bit(R10BIO_WriteError, &r10_bio->state);
|
||||
}
|
||||
} else if (is_badblock(rdev,
|
||||
r10_bio->devs[slot].addr,
|
||||
r10_bio->sectors,
|
||||
&first_bad, &bad_sectors))
|
||||
} else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
|
||||
r10_bio->sectors)) {
|
||||
set_bit(R10BIO_MadeGood, &r10_bio->state);
|
||||
}
|
||||
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
|
||||
@ -2597,11 +2585,8 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
||||
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
|
||||
int sectors, struct page *page, enum req_op op)
|
||||
{
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
|
||||
&& (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
|
||||
if (rdev_has_badblock(rdev, sector, sectors) &&
|
||||
(op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
|
||||
return -1;
|
||||
if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
|
||||
/* success */
|
||||
@ -2658,16 +2643,14 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
||||
s = PAGE_SIZE >> 9;
|
||||
|
||||
do {
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
d = r10_bio->devs[sl].devnum;
|
||||
rdev = conf->mirrors[d].rdev;
|
||||
if (rdev &&
|
||||
test_bit(In_sync, &rdev->flags) &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
|
||||
&first_bad, &bad_sectors) == 0) {
|
||||
rdev_has_badblock(rdev,
|
||||
r10_bio->devs[sl].addr + sect,
|
||||
s) == 0) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
success = sync_page_io(rdev,
|
||||
r10_bio->devs[sl].addr +
|
||||
|
@ -1210,10 +1210,8 @@ again:
|
||||
*/
|
||||
while (op_is_write(op) && rdev &&
|
||||
test_bit(WriteErrorSeen, &rdev->flags)) {
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
&first_bad, &bad_sectors);
|
||||
int bad = rdev_has_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf));
|
||||
if (!bad)
|
||||
break;
|
||||
|
||||
@ -2855,8 +2853,6 @@ static void raid5_end_write_request(struct bio *bi)
|
||||
struct r5conf *conf = sh->raid_conf;
|
||||
int disks = sh->disks, i;
|
||||
struct md_rdev *rdev;
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
int replacement = 0;
|
||||
|
||||
for (i = 0 ; i < disks; i++) {
|
||||
@ -2888,9 +2884,8 @@ static void raid5_end_write_request(struct bio *bi)
|
||||
if (replacement) {
|
||||
if (bi->bi_status)
|
||||
md_error(conf->mddev, rdev);
|
||||
else if (is_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf),
|
||||
&first_bad, &bad_sectors))
|
||||
else if (rdev_has_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf)))
|
||||
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
|
||||
} else {
|
||||
if (bi->bi_status) {
|
||||
@ -2900,9 +2895,8 @@ static void raid5_end_write_request(struct bio *bi)
|
||||
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
||||
set_bit(MD_RECOVERY_NEEDED,
|
||||
&rdev->mddev->recovery);
|
||||
} else if (is_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf),
|
||||
&first_bad, &bad_sectors)) {
|
||||
} else if (rdev_has_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf))) {
|
||||
set_bit(R5_MadeGood, &sh->dev[i].flags);
|
||||
if (test_bit(R5_ReadError, &sh->dev[i].flags))
|
||||
/* That was a successful write so make
|
||||
@ -4674,8 +4668,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
/* Now to look around and see what can be done */
|
||||
for (i=disks; i--; ) {
|
||||
struct md_rdev *rdev;
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
int is_bad = 0;
|
||||
|
||||
dev = &sh->dev[i];
|
||||
@ -4719,8 +4711,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
rdev = conf->disks[i].replacement;
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags) &&
|
||||
rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
|
||||
!is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
&first_bad, &bad_sectors))
|
||||
!rdev_has_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf)))
|
||||
set_bit(R5_ReadRepl, &dev->flags);
|
||||
else {
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags))
|
||||
@ -4733,8 +4725,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
if (rdev && test_bit(Faulty, &rdev->flags))
|
||||
rdev = NULL;
|
||||
if (rdev) {
|
||||
is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
&first_bad, &bad_sectors);
|
||||
is_bad = rdev_has_badblock(rdev, sh->sector,
|
||||
RAID5_STRIPE_SECTORS(conf));
|
||||
if (s->blocked_rdev == NULL
|
||||
&& (test_bit(Blocked, &rdev->flags)
|
||||
|| is_bad < 0)) {
|
||||
@ -5463,8 +5455,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct bio *align_bio;
|
||||
struct md_rdev *rdev;
|
||||
sector_t sector, end_sector, first_bad;
|
||||
int bad_sectors, dd_idx;
|
||||
sector_t sector, end_sector;
|
||||
int dd_idx;
|
||||
bool did_inc;
|
||||
|
||||
if (!in_chunk_boundary(mddev, raid_bio)) {
|
||||
@ -5493,8 +5485,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
||||
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
|
||||
if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
|
||||
&bad_sectors)) {
|
||||
if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user