mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge tag 'md-6.11-20240612' of git://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.11/block
Pull MD updates from Song: "The major changes in this PR are: - sync_action fix and refactoring, by Yu Kuai; - Various small fixes by Christoph Hellwig, Li Nan, and Ofir Gal." * tag 'md-6.11-20240612' of git://git.kernel.org/pub/scm/linux/kernel/git/song/md: md/raid5: avoid BUG_ON() while continue reshape after reassembling md: pass in max_sectors for pers->sync_request() md: factor out helpers for different sync_action in md_do_sync() md: replace last_sync_action with new enum type md: use new helpers in md_do_sync() md: don't fail action_store() if sync_thread is not registered md: remove parameter check_seq for stop_sync_thread() md: replace sysfs api sync_action with new helpers md: factor out helper to start reshape from action_store() md: add new helpers for sync_action md: add a new enum type sync_action md: rearrange recovery_flags md/md-bitmap: fix writing non bitmap pages md/raid1: don't free conf on raid0_run failure md/raid0: don't free conf on raid0_run failure md: make md_flush_request() more readable md: fix deadlock between mddev_suspend and flush bio md: change the return value type of md_write_start to void md: do not delete safemode_timer in mddev_suspend
This commit is contained in:
commit
c2670cf789
@ -3542,7 +3542,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
|
||||
recovery = rs->md.recovery;
|
||||
state = decipher_sync_action(mddev, recovery);
|
||||
progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
|
||||
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
|
||||
resync_mismatches = mddev->last_sync_action == ACTION_CHECK ?
|
||||
atomic64_read(&mddev->resync_mismatches) : 0;
|
||||
|
||||
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
|
||||
|
@ -227,6 +227,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
|
||||
struct block_device *bdev;
|
||||
struct mddev *mddev = bitmap->mddev;
|
||||
struct bitmap_storage *store = &bitmap->storage;
|
||||
unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
|
||||
PAGE_SHIFT;
|
||||
loff_t sboff, offset = mddev->bitmap_info.offset;
|
||||
sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
|
||||
unsigned int size = PAGE_SIZE;
|
||||
@ -269,11 +271,9 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
|
||||
if (size == 0)
|
||||
/* bitmap runs in to data */
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* DATA METADATA BITMAP - no problems */
|
||||
}
|
||||
|
||||
md_super_write(mddev, rdev, sboff + ps, (int) size, page);
|
||||
md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
516
drivers/md/md.c
516
drivers/md/md.c
@ -69,6 +69,16 @@
|
||||
#include "md-bitmap.h"
|
||||
#include "md-cluster.h"
|
||||
|
||||
static const char *action_name[NR_SYNC_ACTIONS] = {
|
||||
[ACTION_RESYNC] = "resync",
|
||||
[ACTION_RECOVER] = "recover",
|
||||
[ACTION_CHECK] = "check",
|
||||
[ACTION_REPAIR] = "repair",
|
||||
[ACTION_RESHAPE] = "reshape",
|
||||
[ACTION_FROZEN] = "frozen",
|
||||
[ACTION_IDLE] = "idle",
|
||||
};
|
||||
|
||||
/* pers_list is a list of registered personalities protected by pers_lock. */
|
||||
static LIST_HEAD(pers_list);
|
||||
static DEFINE_SPINLOCK(pers_lock);
|
||||
@ -479,7 +489,6 @@ int mddev_suspend(struct mddev *mddev, bool interruptible)
|
||||
*/
|
||||
WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
|
||||
|
||||
del_timer_sync(&mddev->safemode_timer);
|
||||
/* restrict memory reclaim I/O during raid array is suspend */
|
||||
mddev->noio_flag = memalloc_noio_save();
|
||||
|
||||
@ -550,13 +559,9 @@ static void md_end_flush(struct bio *bio)
|
||||
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
||||
/* The pair is percpu_ref_get() from md_flush_request() */
|
||||
percpu_ref_put(&mddev->active_io);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->flush_pending))
|
||||
/* The pre-request flush has finished */
|
||||
queue_work(md_wq, &mddev->flush_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void md_submit_flush_data(struct work_struct *ws);
|
||||
@ -587,12 +592,8 @@ static void submit_flushes(struct work_struct *ws)
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
||||
/* The pair is percpu_ref_get() from md_flush_request() */
|
||||
percpu_ref_put(&mddev->active_io);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->flush_pending))
|
||||
queue_work(md_wq, &mddev->flush_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void md_submit_flush_data(struct work_struct *ws)
|
||||
@ -617,8 +618,20 @@ static void md_submit_flush_data(struct work_struct *ws)
|
||||
bio_endio(bio);
|
||||
} else {
|
||||
bio->bi_opf &= ~REQ_PREFLUSH;
|
||||
md_handle_request(mddev, bio);
|
||||
|
||||
/*
|
||||
* make_requst() will never return error here, it only
|
||||
* returns error in raid5_make_request() by dm-raid.
|
||||
* Since dm always splits data and flush operation into
|
||||
* two separate io, io size of flush submitted by dm
|
||||
* always is 0, make_request() will not be called here.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio)))
|
||||
bio_io_error(bio);;
|
||||
}
|
||||
|
||||
/* The pair is percpu_ref_get() from md_flush_request() */
|
||||
percpu_ref_put(&mddev->active_io);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -654,24 +667,22 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
WARN_ON(percpu_ref_is_zero(&mddev->active_io));
|
||||
percpu_ref_get(&mddev->active_io);
|
||||
mddev->flush_bio = bio;
|
||||
bio = NULL;
|
||||
}
|
||||
spin_unlock_irq(&mddev->lock);
|
||||
|
||||
if (!bio) {
|
||||
spin_unlock_irq(&mddev->lock);
|
||||
INIT_WORK(&mddev->flush_work, submit_flushes);
|
||||
queue_work(md_wq, &mddev->flush_work);
|
||||
} else {
|
||||
/* flush was performed for some other bio while we waited. */
|
||||
if (bio->bi_iter.bi_size == 0)
|
||||
/* an empty barrier - all done */
|
||||
bio_endio(bio);
|
||||
else {
|
||||
bio->bi_opf &= ~REQ_PREFLUSH;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
|
||||
/* flush was performed for some other bio while we waited. */
|
||||
spin_unlock_irq(&mddev->lock);
|
||||
if (bio->bi_iter.bi_size == 0) {
|
||||
/* pure flush without data - all done */
|
||||
bio_endio(bio);
|
||||
return true;
|
||||
}
|
||||
|
||||
bio->bi_opf &= ~REQ_PREFLUSH;
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
@ -742,7 +753,6 @@ int mddev_init(struct mddev *mddev)
|
||||
|
||||
mutex_init(&mddev->open_mutex);
|
||||
mutex_init(&mddev->reconfig_mutex);
|
||||
mutex_init(&mddev->sync_mutex);
|
||||
mutex_init(&mddev->suspend_mutex);
|
||||
mutex_init(&mddev->bitmap_info.mutex);
|
||||
INIT_LIST_HEAD(&mddev->disks);
|
||||
@ -758,7 +768,7 @@ int mddev_init(struct mddev *mddev)
|
||||
init_waitqueue_head(&mddev->recovery_wait);
|
||||
mddev->reshape_position = MaxSector;
|
||||
mddev->reshape_backwards = 0;
|
||||
mddev->last_sync_action = "none";
|
||||
mddev->last_sync_action = ACTION_IDLE;
|
||||
mddev->resync_min = 0;
|
||||
mddev->resync_max = MaxSector;
|
||||
mddev->level = LEVEL_NONE;
|
||||
@ -4867,30 +4877,81 @@ out_unlock:
|
||||
static struct md_sysfs_entry md_metadata =
|
||||
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
|
||||
|
||||
enum sync_action md_sync_action(struct mddev *mddev)
|
||||
{
|
||||
unsigned long recovery = mddev->recovery;
|
||||
|
||||
/*
|
||||
* frozen has the highest priority, means running sync_thread will be
|
||||
* stopped immediately, and no new sync_thread can start.
|
||||
*/
|
||||
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
|
||||
return ACTION_FROZEN;
|
||||
|
||||
/*
|
||||
* read-only array can't register sync_thread, and it can only
|
||||
* add/remove spares.
|
||||
*/
|
||||
if (!md_is_rdwr(mddev))
|
||||
return ACTION_IDLE;
|
||||
|
||||
/*
|
||||
* idle means no sync_thread is running, and no new sync_thread is
|
||||
* requested.
|
||||
*/
|
||||
if (!test_bit(MD_RECOVERY_RUNNING, &recovery) &&
|
||||
!test_bit(MD_RECOVERY_NEEDED, &recovery))
|
||||
return ACTION_IDLE;
|
||||
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
|
||||
mddev->reshape_position != MaxSector)
|
||||
return ACTION_RESHAPE;
|
||||
|
||||
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
|
||||
return ACTION_RECOVER;
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
|
||||
/*
|
||||
* MD_RECOVERY_CHECK must be paired with
|
||||
* MD_RECOVERY_REQUESTED.
|
||||
*/
|
||||
if (test_bit(MD_RECOVERY_CHECK, &recovery))
|
||||
return ACTION_CHECK;
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
|
||||
return ACTION_REPAIR;
|
||||
return ACTION_RESYNC;
|
||||
}
|
||||
|
||||
/*
|
||||
* MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
|
||||
* sync_action is specified.
|
||||
*/
|
||||
return ACTION_IDLE;
|
||||
}
|
||||
|
||||
enum sync_action md_sync_action_by_name(const char *page)
|
||||
{
|
||||
enum sync_action action;
|
||||
|
||||
for (action = 0; action < NR_SYNC_ACTIONS; ++action) {
|
||||
if (cmd_match(page, action_name[action]))
|
||||
return action;
|
||||
}
|
||||
|
||||
return NR_SYNC_ACTIONS;
|
||||
}
|
||||
|
||||
const char *md_sync_action_name(enum sync_action action)
|
||||
{
|
||||
return action_name[action];
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
action_show(struct mddev *mddev, char *page)
|
||||
{
|
||||
char *type = "idle";
|
||||
unsigned long recovery = mddev->recovery;
|
||||
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
|
||||
type = "frozen";
|
||||
else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
|
||||
(md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
|
||||
type = "reshape";
|
||||
else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
|
||||
if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
|
||||
type = "resync";
|
||||
else if (test_bit(MD_RECOVERY_CHECK, &recovery))
|
||||
type = "check";
|
||||
else
|
||||
type = "repair";
|
||||
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
|
||||
type = "recover";
|
||||
else if (mddev->reshape_position != MaxSector)
|
||||
type = "reshape";
|
||||
}
|
||||
return sprintf(page, "%s\n", type);
|
||||
enum sync_action action = md_sync_action(mddev);
|
||||
|
||||
return sprintf(page, "%s\n", md_sync_action_name(action));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4899,15 +4960,10 @@ action_show(struct mddev *mddev, char *page)
|
||||
* @locked: if set, reconfig_mutex will still be held after this function
|
||||
* return; if not set, reconfig_mutex will be released after this
|
||||
* function return.
|
||||
* @check_seq: if set, only wait for curent running sync_thread to stop, noted
|
||||
* that new sync_thread can still start.
|
||||
*/
|
||||
static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
|
||||
static void stop_sync_thread(struct mddev *mddev, bool locked)
|
||||
{
|
||||
int sync_seq;
|
||||
|
||||
if (check_seq)
|
||||
sync_seq = atomic_read(&mddev->sync_seq);
|
||||
int sync_seq = atomic_read(&mddev->sync_seq);
|
||||
|
||||
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
if (!locked)
|
||||
@ -4928,7 +4984,8 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
|
||||
|
||||
wait_event(resync_wait,
|
||||
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
(check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
|
||||
(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) &&
|
||||
sync_seq != atomic_read(&mddev->sync_seq)));
|
||||
|
||||
if (locked)
|
||||
mddev_lock_nointr(mddev);
|
||||
@ -4939,7 +4996,7 @@ void md_idle_sync_thread(struct mddev *mddev)
|
||||
lockdep_assert_held(&mddev->reconfig_mutex);
|
||||
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
stop_sync_thread(mddev, true, true);
|
||||
stop_sync_thread(mddev, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_idle_sync_thread);
|
||||
|
||||
@ -4948,7 +5005,7 @@ void md_frozen_sync_thread(struct mddev *mddev)
|
||||
lockdep_assert_held(&mddev->reconfig_mutex);
|
||||
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
stop_sync_thread(mddev, true, false);
|
||||
stop_sync_thread(mddev, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
|
||||
|
||||
@ -4963,100 +5020,127 @@ void md_unfrozen_sync_thread(struct mddev *mddev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
|
||||
|
||||
static void idle_sync_thread(struct mddev *mddev)
|
||||
static int mddev_start_reshape(struct mddev *mddev)
|
||||
{
|
||||
mutex_lock(&mddev->sync_mutex);
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
int ret;
|
||||
|
||||
if (mddev_lock(mddev)) {
|
||||
mutex_unlock(&mddev->sync_mutex);
|
||||
return;
|
||||
if (mddev->pers->start_reshape == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (mddev->reshape_position == MaxSector ||
|
||||
mddev->pers->check_reshape == NULL ||
|
||||
mddev->pers->check_reshape(mddev)) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
ret = mddev->pers->start_reshape(mddev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/*
|
||||
* If reshape is still in progress, and md_check_recovery() can
|
||||
* continue to reshape, don't restart reshape because data can
|
||||
* be corrupted for raid456.
|
||||
*/
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, false, true);
|
||||
mutex_unlock(&mddev->sync_mutex);
|
||||
}
|
||||
|
||||
static void frozen_sync_thread(struct mddev *mddev)
|
||||
{
|
||||
mutex_lock(&mddev->sync_mutex);
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
|
||||
if (mddev_lock(mddev)) {
|
||||
mutex_unlock(&mddev->sync_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, false, false);
|
||||
mutex_unlock(&mddev->sync_mutex);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
action_store(struct mddev *mddev, const char *page, size_t len)
|
||||
{
|
||||
int ret;
|
||||
enum sync_action action;
|
||||
|
||||
if (!mddev->pers || !mddev->pers->sync_request)
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
if (work_busy(&mddev->sync_work))
|
||||
flush_work(&mddev->sync_work);
|
||||
|
||||
if (cmd_match(page, "idle"))
|
||||
idle_sync_thread(mddev);
|
||||
else if (cmd_match(page, "frozen"))
|
||||
frozen_sync_thread(mddev);
|
||||
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return -EBUSY;
|
||||
else if (cmd_match(page, "resync"))
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
else if (cmd_match(page, "recover")) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
|
||||
} else if (cmd_match(page, "reshape")) {
|
||||
int err;
|
||||
if (mddev->pers->start_reshape == NULL)
|
||||
return -EINVAL;
|
||||
err = mddev_lock(mddev);
|
||||
if (!err) {
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
err = -EBUSY;
|
||||
} else if (mddev->reshape_position == MaxSector ||
|
||||
mddev->pers->check_reshape == NULL ||
|
||||
mddev->pers->check_reshape(mddev)) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
err = mddev->pers->start_reshape(mddev);
|
||||
} else {
|
||||
/*
|
||||
* If reshape is still in progress, and
|
||||
* md_check_recovery() can continue to reshape,
|
||||
* don't restart reshape because data can be
|
||||
* corrupted for raid456.
|
||||
*/
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
}
|
||||
mddev_unlock(mddev);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
} else {
|
||||
if (cmd_match(page, "check"))
|
||||
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
||||
else if (!cmd_match(page, "repair"))
|
||||
return -EINVAL;
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
|
||||
ret = mddev_lock(mddev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (work_busy(&mddev->sync_work)) {
|
||||
mddev_unlock(mddev);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
action = md_sync_action_by_name(page);
|
||||
|
||||
/* TODO: mdadm rely on "idle" to start sync_thread. */
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
switch (action) {
|
||||
case ACTION_FROZEN:
|
||||
md_frozen_sync_thread(mddev);
|
||||
ret = len;
|
||||
goto out;
|
||||
case ACTION_IDLE:
|
||||
md_idle_sync_thread(mddev);
|
||||
break;
|
||||
case ACTION_RESHAPE:
|
||||
case ACTION_RECOVER:
|
||||
case ACTION_CHECK:
|
||||
case ACTION_REPAIR:
|
||||
case ACTION_RESYNC:
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
switch (action) {
|
||||
case ACTION_FROZEN:
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
ret = len;
|
||||
goto out;
|
||||
case ACTION_RESHAPE:
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
ret = mddev_start_reshape(mddev);
|
||||
if (ret)
|
||||
goto out;
|
||||
break;
|
||||
case ACTION_RECOVER:
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
|
||||
break;
|
||||
case ACTION_CHECK:
|
||||
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
||||
fallthrough;
|
||||
case ACTION_REPAIR:
|
||||
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
|
||||
fallthrough;
|
||||
case ACTION_RESYNC:
|
||||
case ACTION_IDLE:
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (mddev->ro == MD_AUTO_READ) {
|
||||
/* A write to sync_action is enough to justify
|
||||
* canceling read-auto mode
|
||||
*/
|
||||
flush_work(&mddev->sync_work);
|
||||
mddev->ro = MD_RDWR;
|
||||
md_wakeup_thread(mddev->sync_thread);
|
||||
}
|
||||
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
||||
return len;
|
||||
ret = len;
|
||||
|
||||
out:
|
||||
mddev_unlock(mddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct md_sysfs_entry md_scan_mode =
|
||||
@ -5065,7 +5149,8 @@ __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
|
||||
static ssize_t
|
||||
last_sync_action_show(struct mddev *mddev, char *page)
|
||||
{
|
||||
return sprintf(page, "%s\n", mddev->last_sync_action);
|
||||
return sprintf(page, "%s\n",
|
||||
md_sync_action_name(mddev->last_sync_action));
|
||||
}
|
||||
|
||||
static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
|
||||
@ -6437,7 +6522,7 @@ void md_stop_writes(struct mddev *mddev)
|
||||
{
|
||||
mddev_lock_nointr(mddev);
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
stop_sync_thread(mddev, true, false);
|
||||
stop_sync_thread(mddev, true);
|
||||
__md_stop_writes(mddev);
|
||||
mddev_unlock(mddev);
|
||||
}
|
||||
@ -6505,7 +6590,7 @@ static int md_set_readonly(struct mddev *mddev)
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, false, false);
|
||||
stop_sync_thread(mddev, false);
|
||||
wait_event(mddev->sb_wait,
|
||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
||||
mddev_lock_nointr(mddev);
|
||||
@ -6551,7 +6636,7 @@ static int do_md_stop(struct mddev *mddev, int mode)
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, true, false);
|
||||
stop_sync_thread(mddev, true);
|
||||
|
||||
if (mddev->sysfs_active ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
@ -8641,12 +8726,12 @@ EXPORT_SYMBOL(md_done_sync);
|
||||
* A return value of 'false' means that the write wasn't recorded
|
||||
* and cannot proceed as the array is being suspend.
|
||||
*/
|
||||
bool md_write_start(struct mddev *mddev, struct bio *bi)
|
||||
void md_write_start(struct mddev *mddev, struct bio *bi)
|
||||
{
|
||||
int did_change = 0;
|
||||
|
||||
if (bio_data_dir(bi) != WRITE)
|
||||
return true;
|
||||
return;
|
||||
|
||||
BUG_ON(mddev->ro == MD_RDONLY);
|
||||
if (mddev->ro == MD_AUTO_READ) {
|
||||
@ -8679,15 +8764,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
|
||||
if (did_change)
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
if (!mddev->has_superblocks)
|
||||
return true;
|
||||
return;
|
||||
wait_event(mddev->sb_wait,
|
||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
|
||||
is_md_suspended(mddev));
|
||||
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
||||
percpu_ref_put(&mddev->writes_pending);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
||||
}
|
||||
EXPORT_SYMBOL(md_write_start);
|
||||
|
||||
@ -8835,6 +8914,77 @@ void md_allow_write(struct mddev *mddev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_allow_write);
|
||||
|
||||
static sector_t md_sync_max_sectors(struct mddev *mddev,
|
||||
enum sync_action action)
|
||||
{
|
||||
switch (action) {
|
||||
case ACTION_RESYNC:
|
||||
case ACTION_CHECK:
|
||||
case ACTION_REPAIR:
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
fallthrough;
|
||||
case ACTION_RESHAPE:
|
||||
return mddev->resync_max_sectors;
|
||||
case ACTION_RECOVER:
|
||||
return mddev->dev_sectors;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
|
||||
{
|
||||
sector_t start = 0;
|
||||
struct md_rdev *rdev;
|
||||
|
||||
switch (action) {
|
||||
case ACTION_CHECK:
|
||||
case ACTION_REPAIR:
|
||||
return mddev->resync_min;
|
||||
case ACTION_RESYNC:
|
||||
if (!mddev->bitmap)
|
||||
return mddev->recovery_cp;
|
||||
return 0;
|
||||
case ACTION_RESHAPE:
|
||||
/*
|
||||
* If the original node aborts reshaping then we continue the
|
||||
* reshaping, so set again to avoid restart reshape from the
|
||||
* first beginning
|
||||
*/
|
||||
if (mddev_is_clustered(mddev) &&
|
||||
mddev->reshape_position != MaxSector)
|
||||
return mddev->reshape_position;
|
||||
return 0;
|
||||
case ACTION_RECOVER:
|
||||
start = MaxSector;
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
!test_bit(In_sync, &rdev->flags) &&
|
||||
rdev->recovery_offset < start)
|
||||
start = rdev->recovery_offset;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there is a bitmap, we need to make sure all
|
||||
* writes that started before we added a spare
|
||||
* complete before we start doing a recovery.
|
||||
* Otherwise the write might complete and (via
|
||||
* bitmap_endwrite) set a bit in the bitmap after the
|
||||
* recovery has checked that bit and skipped that
|
||||
* region.
|
||||
*/
|
||||
if (mddev->bitmap) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
return start;
|
||||
default:
|
||||
return MaxSector;
|
||||
}
|
||||
}
|
||||
|
||||
#define SYNC_MARKS 10
|
||||
#define SYNC_MARK_STEP (3*HZ)
|
||||
#define UPDATE_FREQUENCY (5*60*HZ)
|
||||
@ -8851,7 +9001,8 @@ void md_do_sync(struct md_thread *thread)
|
||||
sector_t last_check;
|
||||
int skipped = 0;
|
||||
struct md_rdev *rdev;
|
||||
char *desc, *action = NULL;
|
||||
enum sync_action action;
|
||||
const char *desc;
|
||||
struct blk_plug plug;
|
||||
int ret;
|
||||
|
||||
@ -8882,21 +9033,9 @@ void md_do_sync(struct md_thread *thread)
|
||||
goto skip;
|
||||
}
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
|
||||
desc = "data-check";
|
||||
action = "check";
|
||||
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
||||
desc = "requested-resync";
|
||||
action = "repair";
|
||||
} else
|
||||
desc = "resync";
|
||||
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
desc = "reshape";
|
||||
else
|
||||
desc = "recovery";
|
||||
|
||||
mddev->last_sync_action = action ?: desc;
|
||||
action = md_sync_action(mddev);
|
||||
desc = md_sync_action_name(action);
|
||||
mddev->last_sync_action = action;
|
||||
|
||||
/*
|
||||
* Before starting a resync we must have set curr_resync to
|
||||
@ -8964,56 +9103,8 @@ void md_do_sync(struct md_thread *thread)
|
||||
spin_unlock(&all_mddevs_lock);
|
||||
} while (mddev->curr_resync < MD_RESYNC_DELAYED);
|
||||
|
||||
j = 0;
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||
/* resync follows the size requested by the personality,
|
||||
* which defaults to physical size, but can be virtual size
|
||||
*/
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
/* we don't use the checkpoint if there's a bitmap */
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
j = mddev->resync_min;
|
||||
else if (!mddev->bitmap)
|
||||
j = mddev->recovery_cp;
|
||||
|
||||
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
/*
|
||||
* If the original node aborts reshaping then we continue the
|
||||
* reshaping, so set j again to avoid restart reshape from the
|
||||
* first beginning
|
||||
*/
|
||||
if (mddev_is_clustered(mddev) &&
|
||||
mddev->reshape_position != MaxSector)
|
||||
j = mddev->reshape_position;
|
||||
} else {
|
||||
/* recovery follows the physical size of devices */
|
||||
max_sectors = mddev->dev_sectors;
|
||||
j = MaxSector;
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
!test_bit(In_sync, &rdev->flags) &&
|
||||
rdev->recovery_offset < j)
|
||||
j = rdev->recovery_offset;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there is a bitmap, we need to make sure all
|
||||
* writes that started before we added a spare
|
||||
* complete before we start doing a recovery.
|
||||
* Otherwise the write might complete and (via
|
||||
* bitmap_endwrite) set a bit in the bitmap after the
|
||||
* recovery has checked that bit and skipped that
|
||||
* region.
|
||||
*/
|
||||
if (mddev->bitmap) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
}
|
||||
max_sectors = md_sync_max_sectors(mddev, action);
|
||||
j = md_sync_position(mddev, action);
|
||||
|
||||
pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
|
||||
pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
|
||||
@ -9095,7 +9186,8 @@ void md_do_sync(struct md_thread *thread)
|
||||
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
|
||||
break;
|
||||
|
||||
sectors = mddev->pers->sync_request(mddev, j, &skipped);
|
||||
sectors = mddev->pers->sync_request(mddev, j, max_sectors,
|
||||
&skipped);
|
||||
if (sectors == 0) {
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
break;
|
||||
@ -9185,7 +9277,7 @@ void md_do_sync(struct md_thread *thread)
|
||||
mddev->curr_resync_completed = mddev->curr_resync;
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
}
|
||||
mddev->pers->sync_request(mddev, max_sectors, &skipped);
|
||||
mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
|
||||
|
||||
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
|
||||
mddev->curr_resync > MD_RESYNC_ACTIVE) {
|
||||
|
126
drivers/md/md.h
126
drivers/md/md.h
@ -34,6 +34,61 @@
|
||||
*/
|
||||
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
|
||||
|
||||
/* Status of sync thread. */
|
||||
enum sync_action {
|
||||
/*
|
||||
* Represent by MD_RECOVERY_SYNC, start when:
|
||||
* 1) after assemble, sync data from first rdev to other copies, this
|
||||
* must be done first before other sync actions and will only execute
|
||||
* once;
|
||||
* 2) resize the array(notice that this is not reshape), sync data for
|
||||
* the new range;
|
||||
*/
|
||||
ACTION_RESYNC,
|
||||
/*
|
||||
* Represent by MD_RECOVERY_RECOVER, start when:
|
||||
* 1) for new replacement, sync data based on the replace rdev or
|
||||
* available copies from other rdev;
|
||||
* 2) for new member disk while the array is degraded, sync data from
|
||||
* other rdev;
|
||||
* 3) reassemble after power failure or re-add a hot removed rdev, sync
|
||||
* data from first rdev to other copies based on bitmap;
|
||||
*/
|
||||
ACTION_RECOVER,
|
||||
/*
|
||||
* Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
|
||||
* MD_RECOVERY_CHECK, start when user echo "check" to sysfs api
|
||||
* sync_action, used to check if data copies from differenct rdev are
|
||||
* the same. The number of mismatch sectors will be exported to user
|
||||
* by sysfs api mismatch_cnt;
|
||||
*/
|
||||
ACTION_CHECK,
|
||||
/*
|
||||
* Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when
|
||||
* user echo "repair" to sysfs api sync_action, usually paired with
|
||||
* ACTION_CHECK, used to force syncing data once user found that there
|
||||
* are inconsistent data,
|
||||
*/
|
||||
ACTION_REPAIR,
|
||||
/*
|
||||
* Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
|
||||
* to the conf, notice that this is different from spares or
|
||||
* replacement;
|
||||
*/
|
||||
ACTION_RESHAPE,
|
||||
/*
|
||||
* Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action
|
||||
* or internal usage like setting the array read-only, will forbid above
|
||||
* actions.
|
||||
*/
|
||||
ACTION_FROZEN,
|
||||
/*
|
||||
* All above actions don't match.
|
||||
*/
|
||||
ACTION_IDLE,
|
||||
NR_SYNC_ACTIONS,
|
||||
};
|
||||
|
||||
/*
|
||||
* The struct embedded in rdev is used to serialize IO.
|
||||
*/
|
||||
@ -371,13 +426,12 @@ struct mddev {
|
||||
struct md_thread __rcu *thread; /* management thread */
|
||||
struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */
|
||||
|
||||
/* 'last_sync_action' is initialized to "none". It is set when a
|
||||
* sync operation (i.e "data-check", "requested-resync", "resync",
|
||||
* "recovery", or "reshape") is started. It holds this value even
|
||||
/*
|
||||
* Set when a sync operation is started. It holds this value even
|
||||
* when the sync thread is "frozen" (interrupted) or "idle" (stopped
|
||||
* or finished). It is overwritten when a new sync operation is begun.
|
||||
* or finished). It is overwritten when a new sync operation is begun.
|
||||
*/
|
||||
char *last_sync_action;
|
||||
enum sync_action last_sync_action;
|
||||
sector_t curr_resync; /* last block scheduled */
|
||||
/* As resync requests can complete out of order, we cannot easily track
|
||||
* how much resync has been completed. So we occasionally pause until
|
||||
@ -540,8 +594,6 @@ struct mddev {
|
||||
*/
|
||||
struct list_head deleting;
|
||||
|
||||
/* Used to synchronize idle and frozen for action_store() */
|
||||
struct mutex sync_mutex;
|
||||
/* The sequence number for sync thread */
|
||||
atomic_t sync_seq;
|
||||
|
||||
@ -551,22 +603,46 @@ struct mddev {
|
||||
};
|
||||
|
||||
enum recovery_flags {
|
||||
/* flags for sync thread running status */
|
||||
|
||||
/*
|
||||
* If neither SYNC or RESHAPE are set, then it is a recovery.
|
||||
* set when one of sync action is set and new sync thread need to be
|
||||
* registered, or just add/remove spares from conf.
|
||||
*/
|
||||
MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
|
||||
MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
|
||||
MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
|
||||
MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
|
||||
MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
|
||||
MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
|
||||
MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
|
||||
MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
|
||||
MD_RECOVERY_RESHAPE, /* A reshape is happening */
|
||||
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
|
||||
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
|
||||
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
|
||||
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
|
||||
MD_RECOVERY_NEEDED,
|
||||
/* sync thread is running, or about to be started */
|
||||
MD_RECOVERY_RUNNING,
|
||||
/* sync thread needs to be aborted for some reason */
|
||||
MD_RECOVERY_INTR,
|
||||
/* sync thread is done and is waiting to be unregistered */
|
||||
MD_RECOVERY_DONE,
|
||||
/* running sync thread must abort immediately, and not restart */
|
||||
MD_RECOVERY_FROZEN,
|
||||
/* waiting for pers->start() to finish */
|
||||
MD_RECOVERY_WAIT,
|
||||
/* interrupted because io-error */
|
||||
MD_RECOVERY_ERROR,
|
||||
|
||||
/* flags determines sync action, see details in enum sync_action */
|
||||
|
||||
/* if just this flag is set, action is resync. */
|
||||
MD_RECOVERY_SYNC,
|
||||
/*
|
||||
* paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set,
|
||||
* action is repair, means user requested resync.
|
||||
*/
|
||||
MD_RECOVERY_REQUESTED,
|
||||
/*
|
||||
* paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is
|
||||
* check.
|
||||
*/
|
||||
MD_RECOVERY_CHECK,
|
||||
/* recovery, or need to try it */
|
||||
MD_RECOVERY_RECOVER,
|
||||
/* reshape */
|
||||
MD_RECOVERY_RESHAPE,
|
||||
/* remote node is running resync thread */
|
||||
MD_RESYNCING_REMOTE,
|
||||
};
|
||||
|
||||
enum md_ro_state {
|
||||
@ -653,7 +729,8 @@ struct md_personality
|
||||
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
|
||||
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
|
||||
int (*spare_active) (struct mddev *mddev);
|
||||
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
|
||||
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
|
||||
sector_t max_sector, int *skipped);
|
||||
int (*resize) (struct mddev *mddev, sector_t sectors);
|
||||
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
|
||||
int (*check_reshape) (struct mddev *mddev);
|
||||
@ -785,7 +862,10 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t
|
||||
extern void md_wakeup_thread(struct md_thread __rcu *thread);
|
||||
extern void md_check_recovery(struct mddev *mddev);
|
||||
extern void md_reap_sync_thread(struct mddev *mddev);
|
||||
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
|
||||
extern enum sync_action md_sync_action(struct mddev *mddev);
|
||||
extern enum sync_action md_sync_action_by_name(const char *page);
|
||||
extern const char *md_sync_action_name(enum sync_action action);
|
||||
extern void md_write_start(struct mddev *mddev, struct bio *bi);
|
||||
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
|
||||
extern void md_write_end(struct mddev *mddev);
|
||||
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
|
||||
|
@ -365,18 +365,13 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
|
||||
return array_sectors;
|
||||
}
|
||||
|
||||
static void free_conf(struct mddev *mddev, struct r0conf *conf)
|
||||
{
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r0conf *conf = priv;
|
||||
|
||||
free_conf(mddev, conf);
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
}
|
||||
|
||||
static int raid0_set_limits(struct mddev *mddev)
|
||||
@ -415,7 +410,7 @@ static int raid0_run(struct mddev *mddev)
|
||||
if (!mddev_is_dm(mddev)) {
|
||||
ret = raid0_set_limits(mddev);
|
||||
if (ret)
|
||||
goto out_free_conf;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* calculate array device size */
|
||||
@ -427,13 +422,7 @@ static int raid0_run(struct mddev *mddev)
|
||||
|
||||
dump_zones(mddev);
|
||||
|
||||
ret = md_integrity_register(mddev);
|
||||
if (ret)
|
||||
goto out_free_conf;
|
||||
return 0;
|
||||
out_free_conf:
|
||||
free_conf(mddev, conf);
|
||||
return ret;
|
||||
return md_integrity_register(mddev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1687,8 +1687,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
|
||||
if (bio_data_dir(bio) == READ)
|
||||
raid1_read_request(mddev, bio, sectors, NULL);
|
||||
else {
|
||||
if (!md_write_start(mddev,bio))
|
||||
return false;
|
||||
md_write_start(mddev,bio);
|
||||
raid1_write_request(mddev, bio, sectors);
|
||||
}
|
||||
return true;
|
||||
@ -2757,12 +2756,12 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
|
||||
*/
|
||||
|
||||
static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
int *skipped)
|
||||
sector_t max_sector, int *skipped)
|
||||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
struct r1bio *r1_bio;
|
||||
struct bio *bio;
|
||||
sector_t max_sector, nr_sectors;
|
||||
sector_t nr_sectors;
|
||||
int disk = -1;
|
||||
int i;
|
||||
int wonly = -1;
|
||||
@ -2778,7 +2777,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
if (init_resync(conf))
|
||||
return 0;
|
||||
|
||||
max_sector = mddev->dev_sectors;
|
||||
if (sector_nr >= max_sector) {
|
||||
/* If we aborted, we need to abort the
|
||||
* sync on the 'current' bitmap chunk (there will
|
||||
@ -3204,7 +3202,6 @@ static int raid1_set_limits(struct mddev *mddev)
|
||||
return queue_limits_set(mddev->gendisk->queue, &lim);
|
||||
}
|
||||
|
||||
static void raid1_free(struct mddev *mddev, void *priv);
|
||||
static int raid1_run(struct mddev *mddev)
|
||||
{
|
||||
struct r1conf *conf;
|
||||
@ -3238,7 +3235,7 @@ static int raid1_run(struct mddev *mddev)
|
||||
if (!mddev_is_dm(mddev)) {
|
||||
ret = raid1_set_limits(mddev);
|
||||
if (ret)
|
||||
goto abort;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mddev->degraded = 0;
|
||||
@ -3252,8 +3249,7 @@ static int raid1_run(struct mddev *mddev)
|
||||
*/
|
||||
if (conf->raid_disks - mddev->degraded < 1) {
|
||||
md_unregister_thread(mddev, &conf->thread);
|
||||
ret = -EINVAL;
|
||||
goto abort;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (conf->raid_disks - mddev->degraded == 1)
|
||||
@ -3277,14 +3273,8 @@ static int raid1_run(struct mddev *mddev)
|
||||
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
|
||||
|
||||
ret = md_integrity_register(mddev);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
md_unregister_thread(mddev, &mddev->thread);
|
||||
goto abort;
|
||||
}
|
||||
return 0;
|
||||
|
||||
abort:
|
||||
raid1_free(mddev, conf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
|
||||
if (!md_write_start(mddev, bio))
|
||||
return false;
|
||||
md_write_start(mddev, bio);
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
|
||||
if (!raid10_handle_discard(mddev, bio))
|
||||
@ -3140,12 +3139,12 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
|
||||
*/
|
||||
|
||||
static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
int *skipped)
|
||||
sector_t max_sector, int *skipped)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct r10bio *r10_bio;
|
||||
struct bio *biolist = NULL, *bio;
|
||||
sector_t max_sector, nr_sectors;
|
||||
sector_t nr_sectors;
|
||||
int i;
|
||||
int max_sync;
|
||||
sector_t sync_blocks;
|
||||
@ -3175,10 +3174,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
return 0;
|
||||
|
||||
skipped:
|
||||
max_sector = mddev->dev_sectors;
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
|
||||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
max_sector = mddev->resync_max_sectors;
|
||||
if (sector_nr >= max_sector) {
|
||||
conf->cluster_sync_low = 0;
|
||||
conf->cluster_sync_high = 0;
|
||||
|
@ -6078,8 +6078,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
||||
ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
|
||||
}
|
||||
|
||||
if (!md_write_start(mddev, bi))
|
||||
return false;
|
||||
md_write_start(mddev, bi);
|
||||
/*
|
||||
* If array is degraded, better not do chunk aligned read because
|
||||
* later we might have to read it again in order to reconstruct
|
||||
@ -6255,7 +6254,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
||||
safepos = conf->reshape_safe;
|
||||
sector_div(safepos, data_disks);
|
||||
if (mddev->reshape_backwards) {
|
||||
BUG_ON(writepos < reshape_sectors);
|
||||
if (WARN_ON(writepos < reshape_sectors))
|
||||
return MaxSector;
|
||||
|
||||
writepos -= reshape_sectors;
|
||||
readpos += reshape_sectors;
|
||||
safepos += reshape_sectors;
|
||||
@ -6273,14 +6274,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
||||
* to set 'stripe_addr' which is where we will write to.
|
||||
*/
|
||||
if (mddev->reshape_backwards) {
|
||||
BUG_ON(conf->reshape_progress == 0);
|
||||
if (WARN_ON(conf->reshape_progress == 0))
|
||||
return MaxSector;
|
||||
|
||||
stripe_addr = writepos;
|
||||
BUG_ON((mddev->dev_sectors &
|
||||
~((sector_t)reshape_sectors - 1))
|
||||
- reshape_sectors - stripe_addr
|
||||
!= sector_nr);
|
||||
if (WARN_ON((mddev->dev_sectors &
|
||||
~((sector_t)reshape_sectors - 1)) -
|
||||
reshape_sectors - stripe_addr != sector_nr))
|
||||
return MaxSector;
|
||||
} else {
|
||||
BUG_ON(writepos != sector_nr + reshape_sectors);
|
||||
if (WARN_ON(writepos != sector_nr + reshape_sectors))
|
||||
return MaxSector;
|
||||
|
||||
stripe_addr = sector_nr;
|
||||
}
|
||||
|
||||
@ -6458,11 +6463,10 @@ ret:
|
||||
}
|
||||
|
||||
static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
int *skipped)
|
||||
sector_t max_sector, int *skipped)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct stripe_head *sh;
|
||||
sector_t max_sector = mddev->dev_sectors;
|
||||
sector_t sync_blocks;
|
||||
int still_degraded = 0;
|
||||
int i;
|
||||
|
Loading…
Reference in New Issue
Block a user