dm: support REQ_OP_WRITE_ZEROES
Copy & paste from the REQ_OP_WRITE_SAME code. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
		
							parent
							
								
									0f5d690f7b
								
							
						
					
					
						commit
						ac62d6208a
					
				| @ -132,6 +132,7 @@ void dm_init_md_queue(struct mapped_device *md); | ||||
| void dm_init_normal_md_queue(struct mapped_device *md); | ||||
| int md_in_flight(struct mapped_device *md); | ||||
| void disable_write_same(struct mapped_device *md); | ||||
| void disable_write_zeroes(struct mapped_device *md); | ||||
| 
 | ||||
| static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) | ||||
| { | ||||
|  | ||||
| @ -312,9 +312,12 @@ static void do_region(int op, int op_flags, unsigned region, | ||||
| 	 */ | ||||
| 	if (op == REQ_OP_DISCARD) | ||||
| 		special_cmd_max_sectors = q->limits.max_discard_sectors; | ||||
| 	else if (op == REQ_OP_WRITE_ZEROES) | ||||
| 		special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; | ||||
| 	else if (op == REQ_OP_WRITE_SAME) | ||||
| 		special_cmd_max_sectors = q->limits.max_write_same_sectors; | ||||
| 	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) && | ||||
| 	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || | ||||
| 	     op == REQ_OP_WRITE_SAME)  && | ||||
| 	    special_cmd_max_sectors == 0) { | ||||
| 		dec_count(io, region, -EOPNOTSUPP); | ||||
| 		return; | ||||
| @ -330,6 +333,7 @@ static void do_region(int op, int op_flags, unsigned region, | ||||
| 		 */ | ||||
| 		switch (op) { | ||||
| 		case REQ_OP_DISCARD: | ||||
| 		case REQ_OP_WRITE_ZEROES: | ||||
| 			num_bvecs = 0; | ||||
| 			break; | ||||
| 		case REQ_OP_WRITE_SAME: | ||||
| @ -347,7 +351,7 @@ static void do_region(int op, int op_flags, unsigned region, | ||||
| 		bio_set_op_attrs(bio, op, op_flags); | ||||
| 		store_io_and_region_in_bio(bio, io, region); | ||||
| 
 | ||||
| 		if (op == REQ_OP_DISCARD) { | ||||
| 		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { | ||||
| 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); | ||||
| 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | ||||
| 			remaining -= num_sectors; | ||||
|  | ||||
| @ -59,6 +59,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||||
| 	ti->num_flush_bios = 1; | ||||
| 	ti->num_discard_bios = 1; | ||||
| 	ti->num_write_same_bios = 1; | ||||
| 	ti->num_write_zeroes_bios = 1; | ||||
| 	ti->private = lc; | ||||
| 	return 0; | ||||
| 
 | ||||
|  | ||||
| @ -1103,6 +1103,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) | ||||
| 	ti->num_flush_bios = 1; | ||||
| 	ti->num_discard_bios = 1; | ||||
| 	ti->num_write_same_bios = 1; | ||||
| 	ti->num_write_zeroes_bios = 1; | ||||
| 	if (m->queue_mode == DM_TYPE_BIO_BASED) | ||||
| 		ti->per_io_data_size = multipath_per_bio_data_size(); | ||||
| 	else | ||||
|  | ||||
| @ -298,9 +298,14 @@ static void dm_done(struct request *clone, int error, bool mapped) | ||||
| 			r = rq_end_io(tio->ti, clone, error, &tio->info); | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) && | ||||
| 		     !clone->q->limits.max_write_same_sectors)) | ||||
| 		disable_write_same(tio->md); | ||||
| 	if (unlikely(r == -EREMOTEIO)) { | ||||
| 		if (req_op(clone) == REQ_OP_WRITE_SAME && | ||||
| 		    !clone->q->limits.max_write_same_sectors) | ||||
| 			disable_write_same(tio->md); | ||||
| 		if (req_op(clone) == REQ_OP_WRITE_ZEROES && | ||||
| 		    !clone->q->limits.max_write_zeroes_sectors) | ||||
| 			disable_write_zeroes(tio->md); | ||||
| 	} | ||||
| 
 | ||||
| 	if (r <= 0) | ||||
| 		/* The target wants to complete the I/O */ | ||||
|  | ||||
| @ -169,6 +169,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||||
| 	ti->num_flush_bios = stripes; | ||||
| 	ti->num_discard_bios = stripes; | ||||
| 	ti->num_write_same_bios = stripes; | ||||
| 	ti->num_write_zeroes_bios = stripes; | ||||
| 
 | ||||
| 	sc->chunk_size = chunk_size; | ||||
| 	if (chunk_size & (chunk_size - 1)) | ||||
| @ -293,6 +294,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) | ||||
| 		return DM_MAPIO_REMAPPED; | ||||
| 	} | ||||
| 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || | ||||
| 	    unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) || | ||||
| 	    unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) { | ||||
| 		target_bio_nr = dm_bio_get_target_bio_nr(bio); | ||||
| 		BUG_ON(target_bio_nr >= sc->stripes); | ||||
|  | ||||
| @ -1533,6 +1533,34 @@ static bool dm_table_supports_write_same(struct dm_table *t) | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, | ||||
| 					   sector_t start, sector_t len, void *data) | ||||
| { | ||||
| 	struct request_queue *q = bdev_get_queue(dev->bdev); | ||||
| 
 | ||||
| 	return q && !q->limits.max_write_zeroes_sectors; | ||||
| } | ||||
| 
 | ||||
| static bool dm_table_supports_write_zeroes(struct dm_table *t) | ||||
| { | ||||
| 	struct dm_target *ti; | ||||
| 	unsigned i = 0; | ||||
| 
 | ||||
| 	while (i < dm_table_get_num_targets(t)) { | ||||
| 		ti = dm_table_get_target(t, i++); | ||||
| 
 | ||||
| 		if (!ti->num_write_zeroes_bios) | ||||
| 			return false; | ||||
| 
 | ||||
| 		if (!ti->type->iterate_devices || | ||||
| 		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) | ||||
| 			return false; | ||||
| 	} | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, | ||||
| 				  sector_t start, sector_t len, void *data) | ||||
| { | ||||
| @ -1603,6 +1631,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | ||||
| 
 | ||||
| 	if (!dm_table_supports_write_same(t)) | ||||
| 		q->limits.max_write_same_sectors = 0; | ||||
| 	if (!dm_table_supports_write_zeroes(t)) | ||||
| 		q->limits.max_write_zeroes_sectors = 0; | ||||
| 
 | ||||
| 	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) | ||||
| 		queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | ||||
|  | ||||
| @ -824,6 +824,14 @@ void disable_write_same(struct mapped_device *md) | ||||
| 	limits->max_write_same_sectors = 0; | ||||
| } | ||||
| 
 | ||||
| void disable_write_zeroes(struct mapped_device *md) | ||||
| { | ||||
| 	struct queue_limits *limits = dm_get_queue_limits(md); | ||||
| 
 | ||||
| 	/* device doesn't really support WRITE ZEROES, disable it */ | ||||
| 	limits->max_write_zeroes_sectors = 0; | ||||
| } | ||||
| 
 | ||||
| static void clone_endio(struct bio *bio) | ||||
| { | ||||
| 	int error = bio->bi_error; | ||||
| @ -850,9 +858,14 @@ static void clone_endio(struct bio *bio) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) && | ||||
| 		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) | ||||
| 		disable_write_same(md); | ||||
| 	if (unlikely(r == -EREMOTEIO)) { | ||||
| 		if (bio_op(bio) == REQ_OP_WRITE_SAME && | ||||
| 		    !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) | ||||
| 			disable_write_same(md); | ||||
| 		if (bio_op(bio) == REQ_OP_WRITE_ZEROES && | ||||
| 		    !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) | ||||
| 			disable_write_zeroes(md); | ||||
| 	} | ||||
| 
 | ||||
| 	free_tio(tio); | ||||
| 	dec_pending(io, error); | ||||
| @ -1201,6 +1214,11 @@ static unsigned get_num_write_same_bios(struct dm_target *ti) | ||||
| 	return ti->num_write_same_bios; | ||||
| } | ||||
| 
 | ||||
| static unsigned get_num_write_zeroes_bios(struct dm_target *ti) | ||||
| { | ||||
| 	return ti->num_write_zeroes_bios; | ||||
| } | ||||
| 
 | ||||
| typedef bool (*is_split_required_fn)(struct dm_target *ti); | ||||
| 
 | ||||
| static bool is_split_required_for_discard(struct dm_target *ti) | ||||
| @ -1255,6 +1273,11 @@ static int __send_write_same(struct clone_info *ci) | ||||
| 	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); | ||||
| } | ||||
| 
 | ||||
| static int __send_write_zeroes(struct clone_info *ci) | ||||
| { | ||||
| 	return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Select the correct strategy for processing a non-flush bio. | ||||
|  */ | ||||
| @ -1269,6 +1292,8 @@ static int __split_and_process_non_flush(struct clone_info *ci) | ||||
| 		return __send_discard(ci); | ||||
| 	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) | ||||
| 		return __send_write_same(ci); | ||||
| 	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) | ||||
| 		return __send_write_zeroes(ci); | ||||
| 
 | ||||
| 	ti = dm_table_find_target(ci->map, ci->sector); | ||||
| 	if (!dm_target_is_valid(ti)) | ||||
|  | ||||
| @ -254,6 +254,12 @@ struct dm_target { | ||||
| 	 */ | ||||
| 	unsigned num_write_same_bios; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The number of WRITE ZEROES bios that will be submitted to the target. | ||||
| 	 * The bio number can be accessed with dm_bio_get_target_bio_nr. | ||||
| 	 */ | ||||
| 	unsigned num_write_zeroes_bios; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The minimum number of extra bytes allocated in each io for the | ||||
| 	 * target to use. | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user