block: only build the icq tracking code when needed
Only bfq needs to code to track icq, so make it conditional. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20211209063131.18537-12-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
90b627f542
commit
5ef1630586
@ -35,6 +35,9 @@ config BLK_CGROUP_RWSTAT
|
|||||||
config BLK_DEV_BSG_COMMON
|
config BLK_DEV_BSG_COMMON
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
|
config BLK_ICQ
|
||||||
|
bool
|
||||||
|
|
||||||
config BLK_DEV_BSGLIB
|
config BLK_DEV_BSGLIB
|
||||||
bool "Block layer SG support v4 helper lib"
|
bool "Block layer SG support v4 helper lib"
|
||||||
select BLK_DEV_BSG_COMMON
|
select BLK_DEV_BSG_COMMON
|
||||||
|
@ -18,6 +18,7 @@ config MQ_IOSCHED_KYBER
|
|||||||
|
|
||||||
config IOSCHED_BFQ
|
config IOSCHED_BFQ
|
||||||
tristate "BFQ I/O scheduler"
|
tristate "BFQ I/O scheduler"
|
||||||
|
select BLK_ICQ
|
||||||
help
|
help
|
||||||
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
|
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
|
||||||
of the device among all processes according to their weights,
|
of the device among all processes according to their weights,
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
*/
|
*/
|
||||||
static struct kmem_cache *iocontext_cachep;
|
static struct kmem_cache *iocontext_cachep;
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_ICQ
|
||||||
/**
|
/**
|
||||||
* get_io_context - increment reference count to io_context
|
* get_io_context - increment reference count to io_context
|
||||||
* @ioc: io_context to get
|
* @ioc: io_context to get
|
||||||
@ -162,6 +163,42 @@ static bool ioc_delay_free(struct io_context *ioc)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ioc_clear_queue - break any ioc association with the specified queue
|
||||||
|
* @q: request_queue being cleared
|
||||||
|
*
|
||||||
|
* Walk @q->icq_list and exit all io_cq's.
|
||||||
|
*/
|
||||||
|
void ioc_clear_queue(struct request_queue *q)
|
||||||
|
{
|
||||||
|
LIST_HEAD(icq_list);
|
||||||
|
|
||||||
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
list_splice_init(&q->icq_list, &icq_list);
|
||||||
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
while (!list_empty(&icq_list)) {
|
||||||
|
struct io_cq *icq =
|
||||||
|
list_entry(icq_list.next, struct io_cq, q_node);
|
||||||
|
|
||||||
|
spin_lock_irq(&icq->ioc->lock);
|
||||||
|
if (!(icq->flags & ICQ_DESTROYED))
|
||||||
|
ioc_destroy_icq(icq);
|
||||||
|
spin_unlock_irq(&icq->ioc->lock);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
#else /* CONFIG_BLK_ICQ */
|
||||||
|
static inline void ioc_exit_icqs(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline bool ioc_delay_free(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_BLK_ICQ */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* put_io_context - put a reference of io_context
|
* put_io_context - put a reference of io_context
|
||||||
* @ioc: io_context to put
|
* @ioc: io_context to put
|
||||||
@ -193,33 +230,6 @@ void exit_io_context(struct task_struct *task)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ioc_clear_queue - break any ioc association with the specified queue
|
|
||||||
* @q: request_queue being cleared
|
|
||||||
*
|
|
||||||
* Walk @q->icq_list and exit all io_cq's.
|
|
||||||
*/
|
|
||||||
void ioc_clear_queue(struct request_queue *q)
|
|
||||||
{
|
|
||||||
LIST_HEAD(icq_list);
|
|
||||||
|
|
||||||
spin_lock_irq(&q->queue_lock);
|
|
||||||
list_splice_init(&q->icq_list, &icq_list);
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
while (!list_empty(&icq_list)) {
|
|
||||||
struct io_cq *icq =
|
|
||||||
list_entry(icq_list.next, struct io_cq, q_node);
|
|
||||||
|
|
||||||
spin_lock_irq(&icq->ioc->lock);
|
|
||||||
if (!(icq->flags & ICQ_DESTROYED))
|
|
||||||
ioc_destroy_icq(icq);
|
|
||||||
spin_unlock_irq(&icq->ioc->lock);
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
||||||
{
|
{
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
@ -231,10 +241,12 @@ static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|||||||
|
|
||||||
atomic_long_set(&ioc->refcount, 1);
|
atomic_long_set(&ioc->refcount, 1);
|
||||||
atomic_set(&ioc->active_ref, 1);
|
atomic_set(&ioc->active_ref, 1);
|
||||||
|
#ifdef CONFIG_BLK_ICQ
|
||||||
spin_lock_init(&ioc->lock);
|
spin_lock_init(&ioc->lock);
|
||||||
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
|
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
|
||||||
INIT_HLIST_HEAD(&ioc->icq_list);
|
INIT_HLIST_HEAD(&ioc->icq_list);
|
||||||
INIT_WORK(&ioc->release_work, ioc_release_fn);
|
INIT_WORK(&ioc->release_work, ioc_release_fn);
|
||||||
|
#endif
|
||||||
return ioc;
|
return ioc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,6 +312,7 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_ICQ
|
||||||
/**
|
/**
|
||||||
* ioc_lookup_icq - lookup io_cq from ioc
|
* ioc_lookup_icq - lookup io_cq from ioc
|
||||||
* @q: the associated request_queue
|
* @q: the associated request_queue
|
||||||
@ -428,6 +441,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
|
|||||||
return icq;
|
return icq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ioc_find_get_icq);
|
EXPORT_SYMBOL_GPL(ioc_find_get_icq);
|
||||||
|
#endif /* CONFIG_BLK_ICQ */
|
||||||
|
|
||||||
static int __init blk_ioc_init(void)
|
static int __init blk_ioc_init(void)
|
||||||
{
|
{
|
||||||
|
@ -366,7 +366,13 @@ static inline unsigned int bio_aligned_discard_max_sectors(
|
|||||||
*/
|
*/
|
||||||
struct io_cq *ioc_find_get_icq(struct request_queue *q);
|
struct io_cq *ioc_find_get_icq(struct request_queue *q);
|
||||||
struct io_cq *ioc_lookup_icq(struct request_queue *q);
|
struct io_cq *ioc_lookup_icq(struct request_queue *q);
|
||||||
|
#ifdef CONFIG_BLK_ICQ
|
||||||
void ioc_clear_queue(struct request_queue *q);
|
void ioc_clear_queue(struct request_queue *q);
|
||||||
|
#else
|
||||||
|
static inline void ioc_clear_queue(struct request_queue *q)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_BLK_ICQ */
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
|
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
|
||||||
|
@ -100,16 +100,18 @@ struct io_context {
|
|||||||
atomic_long_t refcount;
|
atomic_long_t refcount;
|
||||||
atomic_t active_ref;
|
atomic_t active_ref;
|
||||||
|
|
||||||
|
unsigned short ioprio;
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_ICQ
|
||||||
/* all the fields below are protected by this lock */
|
/* all the fields below are protected by this lock */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
unsigned short ioprio;
|
|
||||||
|
|
||||||
struct radix_tree_root icq_tree;
|
struct radix_tree_root icq_tree;
|
||||||
struct io_cq __rcu *icq_hint;
|
struct io_cq __rcu *icq_hint;
|
||||||
struct hlist_head icq_list;
|
struct hlist_head icq_list;
|
||||||
|
|
||||||
struct work_struct release_work;
|
struct work_struct release_work;
|
||||||
|
#endif /* CONFIG_BLK_ICQ */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
Loading…
Reference in New Issue
Block a user