forked from Minki/linux
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD updates from Shaohua Li: "A few MD fixes for 4.19-rc1: - several md-cluster fixes from Guoqing - a data corruption fix from BingJing - other cleanups" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md/raid5: fix data corruption of replacements after originals dropped drivers/md/raid5: Do not disable irq on release_inactive_stripe_list() call drivers/md/raid5: Use irqsave variant of atomic_dec_and_lock() md/r5cache: remove redundant pointer bio md-cluster: don't send msg if array is closing md-cluster: show array's status more accurate md-cluster: clear another node's suspend_area after the copy is finished
This commit is contained in:
commit
b219a1d2de
@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_thread *thread)
|
||||
while (cinfo->recovery_map) {
|
||||
slot = fls64((u64)cinfo->recovery_map) - 1;
|
||||
|
||||
/* Clear suspend_area associated with the bitmap */
|
||||
spin_lock_irq(&cinfo->suspend_lock);
|
||||
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
|
||||
if (slot == s->slot) {
|
||||
list_del(&s->list);
|
||||
kfree(s);
|
||||
}
|
||||
spin_unlock_irq(&cinfo->suspend_lock);
|
||||
|
||||
snprintf(str, 64, "bitmap%04d", slot);
|
||||
bm_lockres = lockres_init(mddev, str, NULL, 1);
|
||||
if (!bm_lockres) {
|
||||
@ -331,14 +322,30 @@ static void recover_bitmaps(struct md_thread *thread)
|
||||
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
|
||||
goto clear_bit;
|
||||
}
|
||||
|
||||
/* Clear suspend_area associated with the bitmap */
|
||||
spin_lock_irq(&cinfo->suspend_lock);
|
||||
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
|
||||
if (slot == s->slot) {
|
||||
list_del(&s->list);
|
||||
kfree(s);
|
||||
}
|
||||
spin_unlock_irq(&cinfo->suspend_lock);
|
||||
|
||||
if (hi > 0) {
|
||||
if (lo < mddev->recovery_cp)
|
||||
mddev->recovery_cp = lo;
|
||||
/* wake up thread to continue resync in case resync
|
||||
* is not finished */
|
||||
if (mddev->recovery_cp != MaxSector) {
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
/*
|
||||
* clear the REMOTE flag since we will launch
|
||||
* resync thread in current node.
|
||||
*/
|
||||
clear_bit(MD_RESYNCING_REMOTE,
|
||||
&mddev->recovery);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
}
|
||||
clear_bit:
|
||||
@ -457,6 +464,11 @@ static void process_suspend_info(struct mddev *mddev,
|
||||
struct suspend_info *s;
|
||||
|
||||
if (!hi) {
|
||||
/*
|
||||
* clear the REMOTE flag since resync or recovery is finished
|
||||
* in remote node.
|
||||
*/
|
||||
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
||||
remove_suspend_info(mddev, slot);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
@ -585,6 +597,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
|
||||
revalidate_disk(mddev->gendisk);
|
||||
break;
|
||||
case RESYNCING:
|
||||
set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
||||
process_suspend_info(mddev, le32_to_cpu(msg->slot),
|
||||
le64_to_cpu(msg->low),
|
||||
le64_to_cpu(msg->high));
|
||||
@ -1265,8 +1278,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
|
||||
static int resync_finish(struct mddev *mddev)
|
||||
{
|
||||
struct md_cluster_info *cinfo = mddev->cluster_info;
|
||||
|
||||
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
||||
dlm_unlock_sync(cinfo->resync_lockres);
|
||||
return resync_info_update(mddev, 0, 0);
|
||||
|
||||
/*
|
||||
* If resync thread is interrupted so we can't say resync is finished,
|
||||
* another node will launch resync thread to continue.
|
||||
*/
|
||||
if (test_bit(MD_CLOSING, &mddev->flags))
|
||||
return 0;
|
||||
else
|
||||
return resync_info_update(mddev, 0, 0);
|
||||
}
|
||||
|
||||
static int area_resyncing(struct mddev *mddev, int direction,
|
||||
|
@ -7677,6 +7677,23 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
|
||||
resync -= atomic_read(&mddev->recovery_active);
|
||||
|
||||
if (resync == 0) {
|
||||
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
|
||||
struct md_rdev *rdev;
|
||||
|
||||
rdev_for_each(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
rdev->recovery_offset != MaxSector &&
|
||||
rdev->recovery_offset) {
|
||||
seq_printf(seq, "\trecover=REMOTE");
|
||||
return 1;
|
||||
}
|
||||
if (mddev->reshape_position != MaxSector)
|
||||
seq_printf(seq, "\treshape=REMOTE");
|
||||
else
|
||||
seq_printf(seq, "\tresync=REMOTE");
|
||||
return 1;
|
||||
}
|
||||
if (mddev->recovery_cp < MaxSector) {
|
||||
seq_printf(seq, "\tresync=PENDING");
|
||||
return 1;
|
||||
|
@ -496,6 +496,7 @@ enum recovery_flags {
|
||||
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
|
||||
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
|
||||
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
|
||||
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
|
||||
};
|
||||
|
||||
static inline int __must_check mddev_lock(struct mddev *mddev)
|
||||
|
@ -717,7 +717,6 @@ static void r5c_disable_writeback_async(struct work_struct *work)
|
||||
static void r5l_submit_current_io(struct r5l_log *log)
|
||||
{
|
||||
struct r5l_io_unit *io = log->current_io;
|
||||
struct bio *bio;
|
||||
struct r5l_meta_block *block;
|
||||
unsigned long flags;
|
||||
u32 crc;
|
||||
@ -730,7 +729,6 @@ static void r5l_submit_current_io(struct r5l_log *log)
|
||||
block->meta_size = cpu_to_le32(io->meta_offset);
|
||||
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
|
||||
block->checksum = cpu_to_le32(crc);
|
||||
bio = io->current_bio;
|
||||
|
||||
log->current_io = NULL;
|
||||
spin_lock_irqsave(&log->io_list_lock, flags);
|
||||
|
@ -409,16 +409,14 @@ void raid5_release_stripe(struct stripe_head *sh)
|
||||
md_wakeup_thread(conf->mddev->thread);
|
||||
return;
|
||||
slow_path:
|
||||
local_irq_save(flags);
|
||||
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
|
||||
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
|
||||
if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
|
||||
INIT_LIST_HEAD(&list);
|
||||
hash = sh->hash_lock_index;
|
||||
do_release_stripe(conf, sh, &list);
|
||||
spin_unlock(&conf->device_lock);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
release_inactive_stripe_list(conf, &list, hash);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void remove_hash(struct stripe_head *sh)
|
||||
@ -4521,6 +4519,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
s->failed++;
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags))
|
||||
do_recovery = 1;
|
||||
else if (!rdev) {
|
||||
rdev = rcu_dereference(
|
||||
conf->disks[i].replacement);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags))
|
||||
do_recovery = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(R5_InJournal, &dev->flags))
|
||||
|
Loading…
Reference in New Issue
Block a user