forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm: sysfs revert add empty release function to avoid debug warning dm mpath: fix stall when requeueing io dm raid1: fix null pointer dereference in suspend dm raid1: fail writes if errors are not handled and log fails dm log: userspace fix overhead_size calcuations dm snapshot: persistent annotate work_queue as on stack dm stripe: avoid divide by zero with invalid stripe count
This commit is contained in:
commit
8862627254
@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
|
||||
{
|
||||
int r = 0;
|
||||
size_t dummy = 0;
|
||||
int overhead_size =
|
||||
sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
|
||||
int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
|
||||
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
|
||||
struct receiving_pkg pkg;
|
||||
|
||||
/*
|
||||
* Given the space needed to hold the 'struct cn_msg' and
|
||||
* 'struct dm_ulog_request' - do we have enough payload
|
||||
* space remaining?
|
||||
*/
|
||||
if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
|
||||
DMINFO("Size of tfr exceeds preallocated size");
|
||||
return -EINVAL;
|
||||
@ -191,7 +195,7 @@ resend:
|
||||
*/
|
||||
mutex_lock(&dm_ulog_lock);
|
||||
|
||||
memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
|
||||
memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
|
||||
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
|
||||
tfr->luid = luid;
|
||||
tfr->seq = dm_ulog_seq++;
|
||||
|
@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
|
||||
/*
|
||||
* Dispatch io.
|
||||
*/
|
||||
if (unlikely(ms->log_failure)) {
|
||||
if (unlikely(ms->log_failure) && errors_handled(ms)) {
|
||||
spin_lock_irq(&ms->lock);
|
||||
bio_list_merge(&ms->failures, &sync);
|
||||
spin_unlock_irq(&ms->lock);
|
||||
|
@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
|
||||
spin_lock_irq(&rh->region_lock);
|
||||
if (success)
|
||||
list_add(®->list, ®->rh->recovered_regions);
|
||||
else {
|
||||
reg->state = DM_RH_NOSYNC;
|
||||
else
|
||||
list_add(®->list, ®->rh->failed_recovered_regions);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&rh->region_lock);
|
||||
|
||||
rh->wakeup_workers(rh->context);
|
||||
|
@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
|
||||
* Issue the synchronous I/O from a different thread
|
||||
* to avoid generic_make_request recursion.
|
||||
*/
|
||||
INIT_WORK(&req.work, do_metadata);
|
||||
INIT_WORK_ON_STACK(&req.work, do_metadata);
|
||||
queue_work(ps->metadata_wq, &req.work);
|
||||
flush_workqueue(ps->metadata_wq);
|
||||
|
||||
|
@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
}
|
||||
|
||||
stripes = simple_strtoul(argv[0], &end, 10);
|
||||
if (*end) {
|
||||
if (!stripes || *end) {
|
||||
ti->error = "Invalid stripe count";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -79,13 +79,6 @@ static struct sysfs_ops dm_sysfs_ops = {
|
||||
.show = dm_attr_show,
|
||||
};
|
||||
|
||||
/*
|
||||
* The sysfs structure is embedded in md struct, nothing to do here
|
||||
*/
|
||||
static void dm_sysfs_release(struct kobject *kobj)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* dm kobject is embedded in mapped_device structure
|
||||
* no need to define release function here
|
||||
@ -93,7 +86,6 @@ static void dm_sysfs_release(struct kobject *kobj)
|
||||
static struct kobj_type dm_ktype = {
|
||||
.sysfs_ops = &dm_sysfs_ops,
|
||||
.default_attrs = dm_attrs,
|
||||
.release = dm_sysfs_release
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
static void map_request(struct dm_target *ti, struct request *clone,
|
||||
/*
|
||||
* Returns:
|
||||
* 0 : the request has been processed (not requeued)
|
||||
* !0 : the request has been requeued
|
||||
*/
|
||||
static int map_request(struct dm_target *ti, struct request *clone,
|
||||
struct mapped_device *md)
|
||||
{
|
||||
int r;
|
||||
int r, requeued = 0;
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
|
||||
/*
|
||||
@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
|
||||
case DM_MAPIO_REQUEUE:
|
||||
/* The target wants to requeue the I/O */
|
||||
dm_requeue_unmapped_request(clone);
|
||||
requeued = 1;
|
||||
break;
|
||||
default:
|
||||
if (r > 0) {
|
||||
@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
|
||||
dm_kill_unmapped_request(clone, r);
|
||||
break;
|
||||
}
|
||||
|
||||
return requeued;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
|
||||
atomic_inc(&md->pending[rq_data_dir(clone)]);
|
||||
|
||||
spin_unlock(q->queue_lock);
|
||||
map_request(ti, clone, md);
|
||||
if (map_request(ti, clone, md))
|
||||
goto requeued;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
requeued:
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
plug_and_out:
|
||||
if (!elv_queue_empty(q))
|
||||
/* Some requests still remain, retry later */
|
||||
|
Loading…
Reference in New Issue
Block a user