forked from Minki/linux
block: Make request operation type argument declarations consistent
Instead of declaring the second argument of blk_*_get_request() as int and passing it to functions that expect an unsigned int, declare that second argument as unsigned int. Also because of consistency, rename that second argument from 'rw' into 'op'. This patch does not change any functionality. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0731967877
commit
cd6ce1482f
@ -1347,8 +1347,8 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
gfp_t gfp_mask)
|
||||
static struct request *blk_old_get_request(struct request_queue *q,
|
||||
unsigned int op, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
@ -1356,7 +1356,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
create_io_context(gfp_mask, q->node);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rq = get_request(q, rw, NULL, gfp_mask);
|
||||
rq = get_request(q, op, NULL, gfp_mask);
|
||||
if (IS_ERR(rq)) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return rq;
|
||||
@ -1369,14 +1369,15 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||
struct request *blk_get_request(struct request_queue *q, unsigned int op,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
if (q->mq_ops)
|
||||
return blk_mq_alloc_request(q, rw,
|
||||
return blk_mq_alloc_request(q, op,
|
||||
(gfp_mask & __GFP_DIRECT_RECLAIM) ?
|
||||
0 : BLK_MQ_REQ_NOWAIT);
|
||||
else
|
||||
return blk_old_get_request(q, rw, gfp_mask);
|
||||
return blk_old_get_request(q, op, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_request);
|
||||
|
||||
|
@ -328,7 +328,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct blk_mq_alloc_data alloc_data = { .flags = flags };
|
||||
@ -339,7 +339,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
blk_queue_exit(q);
|
||||
@ -354,8 +354,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_alloc_request);
|
||||
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
|
||||
unsigned int flags, unsigned int hctx_idx)
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
unsigned int op, unsigned int flags, unsigned int hctx_idx)
|
||||
{
|
||||
struct blk_mq_alloc_data alloc_data = { .flags = flags };
|
||||
struct request *rq;
|
||||
@ -390,7 +390,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
|
||||
cpu = cpumask_first(alloc_data.hctx->cpumask);
|
||||
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
|
@ -202,10 +202,10 @@ enum {
|
||||
BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
|
||||
};
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||
unsigned int flags);
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
|
||||
unsigned int flags, unsigned int hctx_idx);
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
unsigned int op, unsigned int flags, unsigned int hctx_idx);
|
||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
|
||||
enum {
|
||||
|
@ -935,7 +935,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
|
||||
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
|
||||
gfp_t gfp_mask);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
|
Loading…
Reference in New Issue
Block a user