mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
[PATCH] block: support larger block pc requests
This patch modifies blk_rq_map/unmap_user() and the cdrom and scsi_ioctl.c users so that it supports requests larger than bio by chaining them together. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
ad2d722570
commit
0e75f9063f
@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
|
||||
|
||||
EXPORT_SYMBOL(blk_insert_request);
|
||||
|
||||
static int __blk_rq_unmap_user(struct bio *bio)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (bio) {
|
||||
if (bio_flagged(bio, BIO_USER_MAPPED))
|
||||
bio_unmap_user(bio);
|
||||
else
|
||||
ret = bio_uncopy_user(bio);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
|
||||
void __user *ubuf, unsigned int len)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
struct bio *bio, *orig_bio;
|
||||
int reading, ret;
|
||||
|
||||
reading = rq_data_dir(rq) == READ;
|
||||
|
||||
/*
|
||||
* if alignment requirement is satisfied, map in user pages for
|
||||
* direct dma. else, set up kernel bounce buffers
|
||||
*/
|
||||
uaddr = (unsigned long) ubuf;
|
||||
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
|
||||
bio = bio_map_user(q, NULL, uaddr, len, reading);
|
||||
else
|
||||
bio = bio_copy_user(q, uaddr, len, reading);
|
||||
|
||||
if (IS_ERR(bio)) {
|
||||
return PTR_ERR(bio);
|
||||
}
|
||||
|
||||
orig_bio = bio;
|
||||
blk_queue_bounce(q, &bio);
|
||||
/*
|
||||
* We link the bounce buffer in and could have to traverse it
|
||||
* later so we have to get a ref to prevent it from being freed
|
||||
*/
|
||||
bio_get(bio);
|
||||
|
||||
/*
|
||||
* for most (all? don't know of any) queues we could
|
||||
* skip grabbing the queue lock here. only drivers with
|
||||
* funky private ->back_merge_fn() function could be
|
||||
* problematic.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!rq->bio)
|
||||
blk_rq_bio_prep(q, rq, bio);
|
||||
else if (!q->back_merge_fn(q, rq, bio)) {
|
||||
ret = -EINVAL;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
goto unmap_bio;
|
||||
} else {
|
||||
rq->biotail->bi_next = bio;
|
||||
rq->biotail = bio;
|
||||
|
||||
rq->nr_sectors += bio_sectors(bio);
|
||||
rq->hard_nr_sectors = rq->nr_sectors;
|
||||
rq->data_len += bio->bi_size;
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return bio->bi_size;
|
||||
|
||||
unmap_bio:
|
||||
/* if it was boucned we must call the end io function */
|
||||
bio_endio(bio, bio->bi_size, 0);
|
||||
__blk_rq_unmap_user(orig_bio);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
|
||||
* unmapping.
|
||||
*/
|
||||
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
|
||||
unsigned int len)
|
||||
unsigned long len)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
struct bio *bio;
|
||||
int reading;
|
||||
unsigned long bytes_read = 0;
|
||||
int ret;
|
||||
|
||||
if (len > (q->max_hw_sectors << 9))
|
||||
return -EINVAL;
|
||||
if (!len || !ubuf)
|
||||
return -EINVAL;
|
||||
|
||||
reading = rq_data_dir(rq) == READ;
|
||||
while (bytes_read != len) {
|
||||
unsigned long map_len, end, start;
|
||||
|
||||
/*
|
||||
* if alignment requirement is satisfied, map in user pages for
|
||||
* direct dma. else, set up kernel bounce buffers
|
||||
*/
|
||||
uaddr = (unsigned long) ubuf;
|
||||
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
|
||||
bio = bio_map_user(q, NULL, uaddr, len, reading);
|
||||
else
|
||||
bio = bio_copy_user(q, uaddr, len, reading);
|
||||
map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
|
||||
end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
|
||||
>> PAGE_SHIFT;
|
||||
start = (unsigned long)ubuf >> PAGE_SHIFT;
|
||||
|
||||
if (!IS_ERR(bio)) {
|
||||
rq->bio = rq->biotail = bio;
|
||||
blk_rq_bio_prep(q, rq, bio);
|
||||
/*
|
||||
* A bad offset could cause us to require BIO_MAX_PAGES + 1
|
||||
* pages. If this happens we just lower the requested
|
||||
* mapping len by a page so that we can fit
|
||||
*/
|
||||
if (end - start > BIO_MAX_PAGES)
|
||||
map_len -= PAGE_SIZE;
|
||||
|
||||
rq->buffer = rq->data = NULL;
|
||||
rq->data_len = len;
|
||||
return 0;
|
||||
ret = __blk_rq_map_user(q, rq, ubuf, map_len);
|
||||
if (ret < 0)
|
||||
goto unmap_rq;
|
||||
bytes_read += ret;
|
||||
ubuf += ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* bio is the err-ptr
|
||||
*/
|
||||
return PTR_ERR(bio);
|
||||
rq->buffer = rq->data = NULL;
|
||||
return 0;
|
||||
unmap_rq:
|
||||
blk_rq_unmap_user(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blk_rq_map_user);
|
||||
@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
|
||||
* unmapping.
|
||||
*/
|
||||
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
|
||||
struct sg_iovec *iov, int iov_count)
|
||||
struct sg_iovec *iov, int iov_count, unsigned int len)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
rq->bio = rq->biotail = bio;
|
||||
if (bio->bi_size != len) {
|
||||
bio_endio(bio, bio->bi_size, 0);
|
||||
bio_unmap_user(bio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bio_get(bio);
|
||||
blk_rq_bio_prep(q, rq, bio);
|
||||
rq->buffer = rq->data = NULL;
|
||||
rq->data_len = bio->bi_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
|
||||
|
||||
/**
|
||||
* blk_rq_unmap_user - unmap a request with user data
|
||||
* @bio: bio to be unmapped
|
||||
* @ulen: length of user buffer
|
||||
* @rq: rq to be unmapped
|
||||
*
|
||||
* Description:
|
||||
* Unmap a bio previously mapped by blk_rq_map_user().
|
||||
* Unmap a rq previously mapped by blk_rq_map_user().
|
||||
* rq->bio must be set to the original head of the request.
|
||||
*/
|
||||
int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
|
||||
int blk_rq_unmap_user(struct request *rq)
|
||||
{
|
||||
int ret = 0;
|
||||
struct bio *bio, *mapped_bio;
|
||||
|
||||
if (bio) {
|
||||
if (bio_flagged(bio, BIO_USER_MAPPED))
|
||||
bio_unmap_user(bio);
|
||||
while ((bio = rq->bio)) {
|
||||
if (bio_flagged(bio, BIO_BOUNCED))
|
||||
mapped_bio = bio->bi_private;
|
||||
else
|
||||
ret = bio_uncopy_user(bio);
|
||||
}
|
||||
mapped_bio = bio;
|
||||
|
||||
__blk_rq_unmap_user(mapped_bio);
|
||||
rq->bio = bio->bi_next;
|
||||
bio_put(bio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
|
||||
if (rq_data_dir(rq) == WRITE)
|
||||
bio->bi_rw |= (1 << BIO_RW);
|
||||
|
||||
rq->bio = rq->biotail = bio;
|
||||
blk_rq_bio_prep(q, rq, bio);
|
||||
|
||||
rq->buffer = rq->data = NULL;
|
||||
rq->data_len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
|
||||
rq->hard_cur_sectors = rq->current_nr_sectors;
|
||||
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
|
||||
rq->buffer = bio_data(bio);
|
||||
rq->data_len = bio->bi_size;
|
||||
|
||||
rq->bio = rq->biotail = bio;
|
||||
}
|
||||
|
@ -226,7 +226,6 @@ static int sg_io(struct file *file, request_queue_t *q,
|
||||
unsigned long start_time;
|
||||
int writing = 0, ret = 0;
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
char sense[SCSI_SENSE_BUFFERSIZE];
|
||||
unsigned char cmd[BLK_MAX_CDB];
|
||||
|
||||
@ -258,6 +257,32 @@ static int sg_io(struct file *file, request_queue_t *q,
|
||||
if (!rq)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* fill in request structure
|
||||
*/
|
||||
rq->cmd_len = hdr->cmd_len;
|
||||
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
|
||||
memcpy(rq->cmd, cmd, hdr->cmd_len);
|
||||
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
|
||||
/*
|
||||
* bounce this after holding a reference to the original bio, it's
|
||||
* needed for proper unmapping
|
||||
*/
|
||||
if (rq->bio)
|
||||
blk_queue_bounce(q, &rq->bio);
|
||||
|
||||
rq->timeout = (hdr->timeout * HZ) / 1000;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = q->sg_timeout;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = BLK_DEFAULT_TIMEOUT;
|
||||
|
||||
if (hdr->iovec_count) {
|
||||
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
|
||||
struct sg_iovec *iov;
|
||||
@ -274,7 +299,8 @@ static int sg_io(struct file *file, request_queue_t *q,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
|
||||
ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
|
||||
hdr->dxfer_len);
|
||||
kfree(iov);
|
||||
} else if (hdr->dxfer_len)
|
||||
ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
|
||||
@ -282,33 +308,6 @@ static int sg_io(struct file *file, request_queue_t *q,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* fill in request structure
|
||||
*/
|
||||
rq->cmd_len = hdr->cmd_len;
|
||||
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
|
||||
memcpy(rq->cmd, cmd, hdr->cmd_len);
|
||||
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
bio = rq->bio;
|
||||
|
||||
/*
|
||||
* bounce this after holding a reference to the original bio, it's
|
||||
* needed for proper unmapping
|
||||
*/
|
||||
if (rq->bio)
|
||||
blk_queue_bounce(q, &rq->bio);
|
||||
|
||||
rq->timeout = (hdr->timeout * HZ) / 1000;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = q->sg_timeout;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = BLK_DEFAULT_TIMEOUT;
|
||||
|
||||
rq->retries = 0;
|
||||
|
||||
start_time = jiffies;
|
||||
@ -339,7 +338,7 @@ static int sg_io(struct file *file, request_queue_t *q,
|
||||
hdr->sb_len_wr = len;
|
||||
}
|
||||
|
||||
if (blk_rq_unmap_user(bio, hdr->dxfer_len))
|
||||
if (blk_rq_unmap_user(rq))
|
||||
ret = -EFAULT;
|
||||
|
||||
/* may not have succeeded, but output values written to control
|
||||
|
@ -2133,16 +2133,14 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
rq->timeout = 60 * HZ;
|
||||
bio = rq->bio;
|
||||
|
||||
if (rq->bio)
|
||||
blk_queue_bounce(q, &rq->bio);
|
||||
|
||||
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
|
||||
struct request_sense *s = rq->sense;
|
||||
ret = -EIO;
|
||||
cdi->last_sense = s->sense_key;
|
||||
}
|
||||
|
||||
if (blk_rq_unmap_user(bio, len))
|
||||
rq->bio = bio;
|
||||
if (blk_rq_unmap_user(rq))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (ret)
|
||||
|
18
fs/bio.c
18
fs/bio.c
@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
|
||||
break;
|
||||
}
|
||||
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
|
||||
ret = -EINVAL;
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
|
||||
break;
|
||||
}
|
||||
|
||||
len -= bytes;
|
||||
}
|
||||
@ -750,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
|
||||
int write_to_vm)
|
||||
{
|
||||
struct bio *bio;
|
||||
int len = 0, i;
|
||||
|
||||
bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
|
||||
|
||||
@ -765,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
|
||||
*/
|
||||
bio_get(bio);
|
||||
|
||||
for (i = 0; i < iov_count; i++)
|
||||
len += iov[i].iov_len;
|
||||
|
||||
if (bio->bi_size == len)
|
||||
return bio;
|
||||
|
||||
/*
|
||||
* don't support partial mappings
|
||||
*/
|
||||
bio_endio(bio, bio->bi_size, 0);
|
||||
bio_unmap_user(bio);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return bio;
|
||||
}
|
||||
|
||||
static void __bio_unmap_user(struct bio *bio)
|
||||
|
@ -678,10 +678,11 @@ extern void __blk_stop_queue(request_queue_t *q);
|
||||
extern void blk_run_queue(request_queue_t *);
|
||||
extern void blk_start_queueing(request_queue_t *);
|
||||
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
|
||||
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
|
||||
extern int blk_rq_unmap_user(struct bio *, unsigned int);
|
||||
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
|
||||
extern int blk_rq_unmap_user(struct request *);
|
||||
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
|
||||
extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
|
||||
extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
|
||||
struct sg_iovec *, int, unsigned int);
|
||||
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
|
||||
|
Loading…
Reference in New Issue
Block a user