block: extend functionality to map bvec iterator

Extend blk_rq_map_user_iov so that it can handle bvec iterator, using
the new blk_rq_map_user_bvec function. It maps the pages from bvec
iterator into a bio and place the bio into request.

This helper will be used by nvme for uring-passthrough path when IO is
done using pre-mapped buffers.

Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Suggested-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220930062749.152261-11-anuj20.g@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Kanchan Joshi 2022-09-30 11:57:47 +05:30 committed by Jens Axboe
parent ab89e8e7ca
commit 3798754793

View File

@ -548,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
} }
EXPORT_SYMBOL(blk_rq_append_bio); EXPORT_SYMBOL(blk_rq_append_bio);
/* Prepare bio for passthrough IO given ITER_BVEC iter */
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
{
struct request_queue *q = rq->q;
size_t nr_iter = iov_iter_count(iter);
size_t nr_segs = iter->nr_segs;
struct bio_vec *bvecs, *bvprvp = NULL;
struct queue_limits *lim = &q->limits;
unsigned int nsegs = 0, bytes = 0;
struct bio *bio;
size_t i;
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
return -EINVAL;
if (nr_segs > queue_max_segments(q))
return -EINVAL;
/* no iovecs to alloc, as we already have a BVEC iterator */
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
if (bio == NULL)
return -ENOMEM;
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
blk_rq_bio_prep(rq, bio, nr_segs);
/* loop to perform a bunch of sanity checks */
bvecs = (struct bio_vec *)iter->bvec;
for (i = 0; i < nr_segs; i++) {
struct bio_vec *bv = &bvecs[i];
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, fallback to copy.
*/
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
blk_mq_map_bio_put(bio);
return -EREMOTEIO;
}
/* check full condition */
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
goto put_bio;
if (bytes + bv->bv_len > nr_iter)
goto put_bio;
if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
goto put_bio;
nsegs++;
bytes += bv->bv_len;
bvprvp = bv;
}
return 0;
put_bio:
blk_mq_map_bio_put(bio);
return -EINVAL;
}
/** /**
* blk_rq_map_user_iov - map user data to a request, for passthrough requests * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
@ -567,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask) const struct iov_iter *iter, gfp_t gfp_mask)
{ {
bool copy = false; bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL; struct bio *bio = NULL;
struct iov_iter i; struct iov_iter i;
int ret = -EINVAL; int ret = -EINVAL;
if (!iter_is_iovec(iter))
goto fail;
if (map_data) if (map_data)
copy = true; copy = true;
else if (blk_queue_may_bounce(q)) else if (blk_queue_may_bounce(q))
copy = true; copy = true;
else if (iov_iter_alignment(iter) & align) else if (iov_iter_alignment(iter) & align)
copy = true; copy = true;
else if (iov_iter_is_bvec(iter))
map_bvec = true;
else if (!iter_is_iovec(iter))
copy = true;
else if (queue_virt_boundary(q)) else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
if (map_bvec) {
ret = blk_rq_map_user_bvec(rq, iter);
if (!ret)
return 0;
if (ret != -EREMOTEIO)
goto fail;
/* fall back to copying the data on limits mismatches */
copy = true;
}
i = *iter; i = *iter;
do { do {
if (copy) if (copy)