block, cfq: move io_cq lookup to blk-ioc.c
Now that all io_cq related data structures are in block core layer, io_cq lookup can be moved from cfq-iosched.c to blk-ioc.c. Lookup logic from cfq_cic_lookup() is moved to ioc_lookup_icq() with parameter return type changes (cfqd -> request_queue, cfq_io_cq -> io_cq) and cfq_cic_lookup() becomes thin wrapper around cfq_cic_lookup(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a612fddf0d
commit
47fdd4ca96
@ -266,6 +266,42 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_task_io_context);
|
EXPORT_SYMBOL(get_task_io_context);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ioc_lookup_icq - lookup io_cq from ioc
|
||||||
|
* @ioc: the associated io_context
|
||||||
|
* @q: the associated request_queue
|
||||||
|
*
|
||||||
|
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
|
||||||
|
* with @q->queue_lock held.
|
||||||
|
*/
|
||||||
|
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct io_cq *icq;
|
||||||
|
|
||||||
|
lockdep_assert_held(q->queue_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* icq's are indexed from @ioc using radix tree and hint pointer,
|
||||||
|
* both of which are protected with RCU. All removals are done
|
||||||
|
* holding both q and ioc locks, and we're holding q lock - if we
|
||||||
|
* find a icq which points to us, it's guaranteed to be valid.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
icq = rcu_dereference(ioc->icq_hint);
|
||||||
|
if (icq && icq->q == q)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
|
||||||
|
if (icq && icq->q == q)
|
||||||
|
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
|
||||||
|
else
|
||||||
|
icq = NULL;
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
|
return icq;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ioc_lookup_icq);
|
||||||
|
|
||||||
void ioc_set_changed(struct io_context *ioc, int which)
|
void ioc_set_changed(struct io_context *ioc, int which)
|
||||||
{
|
{
|
||||||
struct io_cq *icq;
|
struct io_cq *icq;
|
||||||
|
@ -199,6 +199,7 @@ static inline int blk_do_io_stat(struct request *rq)
|
|||||||
* Internal io_context interface
|
* Internal io_context interface
|
||||||
*/
|
*/
|
||||||
void get_io_context(struct io_context *ioc);
|
void get_io_context(struct io_context *ioc);
|
||||||
|
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
|
||||||
|
|
||||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
|
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
|
||||||
int node);
|
int node);
|
||||||
|
@ -468,7 +468,6 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
|
|||||||
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
||||||
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
|
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
|
||||||
struct io_context *, gfp_t);
|
struct io_context *, gfp_t);
|
||||||
static struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *, struct io_context *);
|
|
||||||
|
|
||||||
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
|
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
|
||||||
{
|
{
|
||||||
@ -476,6 +475,14 @@ static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
|
|||||||
return container_of(icq, struct cfq_io_cq, icq);
|
return container_of(icq, struct cfq_io_cq, icq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
|
||||||
|
struct io_context *ioc)
|
||||||
|
{
|
||||||
|
if (ioc)
|
||||||
|
return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
|
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
|
||||||
{
|
{
|
||||||
return cic->cfqq[is_sync];
|
return cic->cfqq[is_sync];
|
||||||
@ -2970,45 +2977,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
|
|||||||
return cfqq;
|
return cfqq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* cfq_cic_lookup - lookup cfq_io_cq
|
|
||||||
* @cfqd: the associated cfq_data
|
|
||||||
* @ioc: the associated io_context
|
|
||||||
*
|
|
||||||
* Look up cfq_io_cq associated with @cfqd - @ioc pair. Must be called
|
|
||||||
* with queue_lock held.
|
|
||||||
*/
|
|
||||||
static struct cfq_io_cq *
|
|
||||||
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
|
|
||||||
{
|
|
||||||
struct request_queue *q = cfqd->queue;
|
|
||||||
struct io_cq *icq;
|
|
||||||
|
|
||||||
lockdep_assert_held(cfqd->queue->queue_lock);
|
|
||||||
if (unlikely(!ioc))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* icq's are indexed from @ioc using radix tree and hint pointer,
|
|
||||||
* both of which are protected with RCU. All removals are done
|
|
||||||
* holding both q and ioc locks, and we're holding q lock - if we
|
|
||||||
* find a icq which points to us, it's guaranteed to be valid.
|
|
||||||
*/
|
|
||||||
rcu_read_lock();
|
|
||||||
icq = rcu_dereference(ioc->icq_hint);
|
|
||||||
if (icq && icq->q == q)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
icq = radix_tree_lookup(&ioc->icq_tree, cfqd->queue->id);
|
|
||||||
if (icq && icq->q == q)
|
|
||||||
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
|
|
||||||
else
|
|
||||||
icq = NULL;
|
|
||||||
out:
|
|
||||||
rcu_read_unlock();
|
|
||||||
return icq_to_cic(icq);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cfq_create_cic - create and link a cfq_io_cq
|
* cfq_create_cic - create and link a cfq_io_cq
|
||||||
* @cfqd: cfqd of interest
|
* @cfqd: cfqd of interest
|
||||||
|
Loading…
Reference in New Issue
Block a user