mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
block: fix queue limits checks in blk_rq_map_user_bvec for real
blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
and the last fix to it enabled valid NVMe I/O to pass, but also allowed
invalid one for drivers that set a max_segment_size or seg_boundary
limit.
Fix it once for all by using the bio_split_rw_at helper from the I/O
path that indicates if and where a bio would be have to be split to
adhere to the queue limits, and it returns a positive value, turn that
into -EREMOTEIO to retry using the copy path.
Fixes: 2ff9494418
("block: fix sanity checks in blk_rq_map_user_bvec")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2ff9494418
commit
be0e822bb3
@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
|
|||||||
/* Prepare bio for passthrough IO given ITER_BVEC iter */
|
/* Prepare bio for passthrough IO given ITER_BVEC iter */
|
||||||
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
const struct queue_limits *lim = &rq->q->limits;
|
||||||
size_t nr_iter = iov_iter_count(iter);
|
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
|
||||||
size_t nr_segs = iter->nr_segs;
|
unsigned int nsegs;
|
||||||
struct bio_vec *bvecs, *bvprvp = NULL;
|
|
||||||
const struct queue_limits *lim = &q->limits;
|
|
||||||
unsigned int nsegs = 0, bytes = 0;
|
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
size_t i;
|
int ret;
|
||||||
|
|
||||||
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
|
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
|
||||||
return -EINVAL;
|
|
||||||
if (nr_segs > queue_max_segments(q))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* no iovecs to alloc, as we already have a BVEC iterator */
|
/* reuse the bvecs from the iterator instead of allocating new ones */
|
||||||
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
|
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
|
||||||
if (bio == NULL)
|
if (!bio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
|
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
|
||||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
|
||||||
|
|
||||||
/* loop to perform a bunch of sanity checks */
|
/* check that the data layout matches the hardware restrictions */
|
||||||
bvecs = (struct bio_vec *)iter->bvec;
|
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
|
||||||
for (i = 0; i < nr_segs; i++) {
|
if (ret) {
|
||||||
struct bio_vec *bv = &bvecs[i];
|
/* if we would have to split the bio, copy instead */
|
||||||
|
if (ret > 0)
|
||||||
/*
|
ret = -EREMOTEIO;
|
||||||
* If the queue doesn't support SG gaps and adding this
|
blk_mq_map_bio_put(bio);
|
||||||
* offset would create a gap, fallback to copy.
|
return ret;
|
||||||
*/
|
|
||||||
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
|
|
||||||
blk_mq_map_bio_put(bio);
|
|
||||||
return -EREMOTEIO;
|
|
||||||
}
|
|
||||||
/* check full condition */
|
|
||||||
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
|
|
||||||
goto put_bio;
|
|
||||||
if (bytes + bv->bv_len > nr_iter)
|
|
||||||
break;
|
|
||||||
|
|
||||||
nsegs++;
|
|
||||||
bytes += bv->bv_len;
|
|
||||||
bvprvp = bv;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_rq_bio_prep(rq, bio, nsegs);
|
||||||
return 0;
|
return 0;
|
||||||
put_bio:
|
|
||||||
blk_mq_map_bio_put(bio);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user