forked from Minki/linux
Merge tag 'md/4.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD fixes from Shaohua Li: - Fix a locking issue for md-cluster (Guoqing) - Fix a sync crash for raid10 (Ni) - Fix a reshape bug with raid5 cache enabled (me) * tag 'md/4.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md-cluster: release RESYNC lock after the last resync message RAID10 BUG_ON in raise_barrier when force is true and conf->barrier is 0 md/raid5-cache: disable reshape completely
This commit is contained in:
commit
3d0e7a9e00
@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
|
|||||||
static int resync_finish(struct mddev *mddev)
|
static int resync_finish(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
struct md_cluster_info *cinfo = mddev->cluster_info;
|
struct md_cluster_info *cinfo = mddev->cluster_info;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
||||||
dlm_unlock_sync(cinfo->resync_lockres);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If resync thread is interrupted so we can't say resync is finished,
|
* If resync thread is interrupted so we can't say resync is finished,
|
||||||
* another node will launch resync thread to continue.
|
* another node will launch resync thread to continue.
|
||||||
*/
|
*/
|
||||||
if (test_bit(MD_CLOSING, &mddev->flags))
|
if (!test_bit(MD_CLOSING, &mddev->flags))
|
||||||
return 0;
|
ret = resync_info_update(mddev, 0, 0);
|
||||||
else
|
dlm_unlock_sync(cinfo->resync_lockres);
|
||||||
return resync_info_update(mddev, 0, 0);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int area_resyncing(struct mddev *mddev, int direction,
|
static int area_resyncing(struct mddev *mddev, int direction,
|
||||||
|
@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
allow_barrier(conf);
|
allow_barrier(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raise_barrier(conf, 0);
|
||||||
read_more:
|
read_more:
|
||||||
/* Now schedule reads for blocks from sector_nr to last */
|
/* Now schedule reads for blocks from sector_nr to last */
|
||||||
r10_bio = raid10_alloc_init_r10buf(conf);
|
r10_bio = raid10_alloc_init_r10buf(conf);
|
||||||
r10_bio->state = 0;
|
r10_bio->state = 0;
|
||||||
raise_barrier(conf, sectors_done != 0);
|
raise_barrier(conf, 1);
|
||||||
atomic_set(&r10_bio->remaining, 0);
|
atomic_set(&r10_bio->remaining, 0);
|
||||||
r10_bio->mddev = mddev;
|
r10_bio->mddev = mddev;
|
||||||
r10_bio->sector = sector_nr;
|
r10_bio->sector = sector_nr;
|
||||||
@ -4629,6 +4630,8 @@ read_more:
|
|||||||
if (sector_nr <= last)
|
if (sector_nr <= last)
|
||||||
goto read_more;
|
goto read_more;
|
||||||
|
|
||||||
|
lower_barrier(conf);
|
||||||
|
|
||||||
/* Now that we have done the whole section we can
|
/* Now that we have done the whole section we can
|
||||||
* update reshape_progress
|
* update reshape_progress
|
||||||
*/
|
*/
|
||||||
|
@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
|
|||||||
extern void ppl_quiesce(struct r5conf *conf, int quiesce);
|
extern void ppl_quiesce(struct r5conf *conf, int quiesce);
|
||||||
extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
|
extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
|
||||||
|
|
||||||
|
static inline bool raid5_has_log(struct r5conf *conf)
|
||||||
|
{
|
||||||
|
return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool raid5_has_ppl(struct r5conf *conf)
|
static inline bool raid5_has_ppl(struct r5conf *conf)
|
||||||
{
|
{
|
||||||
return test_bit(MD_HAS_PPL, &conf->mddev->flags);
|
return test_bit(MD_HAS_PPL, &conf->mddev->flags);
|
||||||
|
@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
|
|||||||
{
|
{
|
||||||
struct r5conf *conf = sh->raid_conf;
|
struct r5conf *conf = sh->raid_conf;
|
||||||
|
|
||||||
if (conf->log || raid5_has_ppl(conf))
|
if (raid5_has_log(conf) || raid5_has_ppl(conf))
|
||||||
return false;
|
return false;
|
||||||
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
|
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
|
||||||
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
|
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
|
||||||
@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
|
|||||||
sector_t newsize;
|
sector_t newsize;
|
||||||
struct r5conf *conf = mddev->private;
|
struct r5conf *conf = mddev->private;
|
||||||
|
|
||||||
if (conf->log || raid5_has_ppl(conf))
|
if (raid5_has_log(conf) || raid5_has_ppl(conf))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
sectors &= ~((sector_t)conf->chunk_sectors - 1);
|
sectors &= ~((sector_t)conf->chunk_sectors - 1);
|
||||||
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
|
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
|
||||||
@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
|
|||||||
{
|
{
|
||||||
struct r5conf *conf = mddev->private;
|
struct r5conf *conf = mddev->private;
|
||||||
|
|
||||||
if (conf->log || raid5_has_ppl(conf))
|
if (raid5_has_log(conf) || raid5_has_ppl(conf))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (mddev->delta_disks == 0 &&
|
if (mddev->delta_disks == 0 &&
|
||||||
mddev->new_layout == mddev->layout &&
|
mddev->new_layout == mddev->layout &&
|
||||||
|
Loading…
Reference in New Issue
Block a user