[PATCH] device-mapper multipath: Fix pg initialisation races

Prevent more than one priority group initialisation function from being
outstanding at once.  Otherwise the completion functions interfere with each
other.  Also, reloading the table could reference a freed pointer.

Only reset queue_io in pg_init_complete if another pg_init isn't required.
Skip process_queued_ios if the queue is empty so that we only trigger a
pg_init if there's I/O.

Signed-off-by: Lars Marowsky-Bree <lmb@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Alasdair G Kergon 2005-07-12 15:53:04 -07:00 committed by Linus Torvalds
parent 436d41087d
commit c3cd4f6b27

View File

@ -63,6 +63,7 @@ struct multipath {
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
@ -308,7 +309,8 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
/* Queue for the daemon to resubmit */
bio_list_add(&m->queued_ios, bio);
m->queue_size++;
if (m->pg_init_required || !m->queue_io)
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = 0;
@ -335,7 +337,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
m->saved_queue_if_no_path = m->queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path)
if (!m->queue_if_no_path && m->queue_size)
queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
@ -380,25 +382,31 @@ static void process_queued_ios(void *data)
{
struct multipath *m = (struct multipath *) data;
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath;
unsigned init_required, must_queue = 0;
struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (!m->queue_size)
goto out;
if (!m->current_pgpath)
__choose_pgpath(m);
pgpath = m->current_pgpath;
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path))
must_queue = 1;
if ((pgpath && !m->queue_io) ||
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
init_required = m->pg_init_required;
if (init_required)
if (m->pg_init_required && !m->pg_init_in_progress) {
m->pg_init_required = 0;
m->pg_init_in_progress = 1;
init_required = 1;
}
out:
spin_unlock_irqrestore(&m->lock, flags);
if (init_required)
@ -843,7 +851,7 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->path.is_active = 1;
m->current_pgpath = NULL;
if (!m->nr_valid_paths++)
if (!m->nr_valid_paths++ && m->queue_size)
queue_work(kmultipathd, &m->process_queued_ios);
queue_work(kmultipathd, &m->trigger_event);
@ -969,12 +977,13 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags)
bypass_pg(m, pg, 1);
spin_lock_irqsave(&m->lock, flags);
if (!err_flags)
m->queue_io = 0;
else {
if (err_flags) {
m->current_pgpath = NULL;
m->current_pg = NULL;
}
} else if (!m->pg_init_required)
m->queue_io = 0;
m->pg_init_in_progress = 0;
queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}