forked from Minki/linux
dm: simplify dm_sumbit_bio_remap interface
Remove the from_wq argument from dm_sumbit_bio_remap(). Eliminates the need for dm_sumbit_bio_remap() callers to know whether they are calling for a workqueue or from the original dm_submit_bio(). Add map_task to dm_io struct, record the map_task in alloc_io and clear it after all target ->map() calls have completed. Update dm_sumbit_bio_remap to check if 'current' matches io->map_task rather than rely on passed 'from_rq' argument. This change really simplifies the chore of porting each DM target to using dm_sumbit_bio_remap() because there is no longer the risk of programming error by not completely knowing all the different contexts a particular method that calls dm_sumbit_bio_remap() might be used in. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
a92512819b
commit
b7f8dff098
@ -237,6 +237,7 @@ struct dm_io {
|
||||
unsigned long start_time;
|
||||
void *data;
|
||||
struct hlist_node node;
|
||||
struct task_struct *map_task;
|
||||
spinlock_t endio_lock;
|
||||
struct dm_stats_aux stats_aux;
|
||||
/* last member of dm_target_io is 'struct bio' */
|
||||
|
@ -1857,7 +1857,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||
return 1;
|
||||
}
|
||||
|
||||
dm_submit_bio_remap(io->base_bio, clone, (gfp != CRYPT_MAP_READ_GFP));
|
||||
dm_submit_bio_remap(io->base_bio, clone);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1883,7 +1883,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
|
||||
{
|
||||
struct bio *clone = io->ctx.bio_out;
|
||||
|
||||
dm_submit_bio_remap(io->base_bio, clone, true);
|
||||
dm_submit_bio_remap(io->base_bio, clone);
|
||||
}
|
||||
|
||||
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
|
||||
@ -1962,7 +1962,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
||||
|
||||
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
|
||||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
|
||||
dm_submit_bio_remap(io->base_bio, clone, true);
|
||||
dm_submit_bio_remap(io->base_bio, clone);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
|
||||
while (bio) {
|
||||
n = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
dm_submit_bio_remap(bio, NULL, true);
|
||||
dm_submit_bio_remap(bio, NULL);
|
||||
bio = n;
|
||||
}
|
||||
}
|
||||
|
@ -755,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
if (!bio_triggers_commit(tc, bio)) {
|
||||
dm_submit_bio_remap(bio, NULL, true);
|
||||
dm_submit_bio_remap(bio, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2383,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool)
|
||||
if (bio->bi_opf & REQ_PREFLUSH)
|
||||
bio_endio(bio);
|
||||
else
|
||||
dm_submit_bio_remap(bio, NULL, true);
|
||||
dm_submit_bio_remap(bio, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,6 +574,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
||||
this_cpu_inc(*md->pending_io);
|
||||
io->orig_bio = NULL;
|
||||
io->md = md;
|
||||
io->map_task = current;
|
||||
spin_lock_init(&io->endio_lock);
|
||||
|
||||
io->start_time = jiffies;
|
||||
@ -1189,15 +1190,13 @@ static inline void __dm_submit_bio_remap(struct bio *clone,
|
||||
/*
|
||||
* @clone: clone bio that DM core passed to target's .map function
|
||||
* @tgt_clone: clone of @clone bio that target needs submitted
|
||||
* @from_wq: caller is a workqueue thread managed by DM target
|
||||
*
|
||||
* Targets should use this interface to submit bios they take
|
||||
* ownership of when returning DM_MAPIO_SUBMITTED.
|
||||
*
|
||||
* Target should also enable ti->accounts_remapped_io
|
||||
*/
|
||||
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone,
|
||||
bool from_wq)
|
||||
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
|
||||
{
|
||||
struct dm_target_io *tio = clone_to_tio(clone);
|
||||
struct dm_io *io = tio->io;
|
||||
@ -1212,7 +1211,7 @@ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone,
|
||||
* Account io->origin_bio to DM dev on behalf of target
|
||||
* that took ownership of IO with DM_MAPIO_SUBMITTED.
|
||||
*/
|
||||
if (!from_wq) {
|
||||
if (io->map_task == current) {
|
||||
/* Still in target's map function */
|
||||
io->start_io_acct = true;
|
||||
} else {
|
||||
@ -1568,6 +1567,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
|
||||
}
|
||||
|
||||
error = __split_and_process_bio(&ci);
|
||||
ci.io->map_task = NULL;
|
||||
if (error || !ci.sector_count)
|
||||
goto out;
|
||||
|
||||
|
@ -471,7 +471,7 @@ int dm_suspended(struct dm_target *ti);
|
||||
int dm_post_suspending(struct dm_target *ti);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, bool from_wq);
|
||||
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
Loading…
Reference in New Issue
Block a user