forked from Minki/linux
blk-cgroup: pass a gendisk to blkcg_schedule_throttle
Pass the gendisk to blkcg_schedule_throttle as part of moving the blk-cgroup infrastructure to be gendisk based. Remove the unused !BLK_CGROUP stub while we're at it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Andreas Herrmann <aherrmann@suse.de> Acked-by: Tejun Heo <tj@kernel.org> Link: https://lore.kernel.org/r/20220921180501.1539876-17-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
00ad6991bb
commit
de185b56e8
@ -1792,13 +1792,13 @@ out:
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* blkcg_schedule_throttle - this task needs to check for throttling
|
* blkcg_schedule_throttle - this task needs to check for throttling
|
||||||
* @q: the request queue IO was submitted on
|
* @gendisk: disk to throttle
|
||||||
* @use_memdelay: do we charge this to memory delay for PSI
|
* @use_memdelay: do we charge this to memory delay for PSI
|
||||||
*
|
*
|
||||||
* This is called by the IO controller when we know there's delay accumulated
|
* This is called by the IO controller when we know there's delay accumulated
|
||||||
* for the blkg for this task. We do not pass the blkg because there are places
|
* for the blkg for this task. We do not pass the blkg because there are places
|
||||||
* we call this that may not have that information, the swapping code for
|
* we call this that may not have that information, the swapping code for
|
||||||
* instance will only have a request_queue at that point. This set's the
|
* instance will only have a block_device at that point. This set's the
|
||||||
* notify_resume for the task to check and see if it requires throttling before
|
* notify_resume for the task to check and see if it requires throttling before
|
||||||
* returning to user space.
|
* returning to user space.
|
||||||
*
|
*
|
||||||
@ -1807,8 +1807,10 @@ out:
|
|||||||
* throttle once. If the task needs to be throttled again it'll need to be
|
* throttle once. If the task needs to be throttled again it'll need to be
|
||||||
* re-set at the next time we see the task.
|
* re-set at the next time we see the task.
|
||||||
*/
|
*/
|
||||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
|
void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = disk->queue;
|
||||||
|
|
||||||
if (unlikely(current->flags & PF_KTHREAD))
|
if (unlikely(current->flags & PF_KTHREAD))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -2636,7 +2636,7 @@ retry_lock:
|
|||||||
if (use_debt) {
|
if (use_debt) {
|
||||||
iocg_incur_debt(iocg, abs_cost, &now);
|
iocg_incur_debt(iocg, abs_cost, &now);
|
||||||
if (iocg_kick_delay(iocg, &now))
|
if (iocg_kick_delay(iocg, &now))
|
||||||
blkcg_schedule_throttle(rqos->q,
|
blkcg_schedule_throttle(rqos->q->disk,
|
||||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||||
iocg_unlock(iocg, ioc_locked, &flags);
|
iocg_unlock(iocg, ioc_locked, &flags);
|
||||||
return;
|
return;
|
||||||
@ -2737,7 +2737,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
|
|||||||
if (likely(!list_empty(&iocg->active_list))) {
|
if (likely(!list_empty(&iocg->active_list))) {
|
||||||
iocg_incur_debt(iocg, abs_cost, &now);
|
iocg_incur_debt(iocg, abs_cost, &now);
|
||||||
if (iocg_kick_delay(iocg, &now))
|
if (iocg_kick_delay(iocg, &now))
|
||||||
blkcg_schedule_throttle(rqos->q,
|
blkcg_schedule_throttle(rqos->q->disk,
|
||||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||||
} else {
|
} else {
|
||||||
iocg_commit_bio(iocg, bio, abs_cost, cost);
|
iocg_commit_bio(iocg, bio, abs_cost, cost);
|
||||||
|
@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
|
|||||||
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
|
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
|
||||||
|
|
||||||
if (use_delay)
|
if (use_delay)
|
||||||
blkcg_schedule_throttle(rqos->q, use_memdelay);
|
blkcg_schedule_throttle(rqos->q->disk, use_memdelay);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To avoid priority inversions we want to just take a slot if we are
|
* To avoid priority inversions we want to just take a slot if we are
|
||||||
|
@ -18,14 +18,14 @@
|
|||||||
|
|
||||||
struct bio;
|
struct bio;
|
||||||
struct cgroup_subsys_state;
|
struct cgroup_subsys_state;
|
||||||
struct request_queue;
|
struct gendisk;
|
||||||
|
|
||||||
#define FC_APPID_LEN 129
|
#define FC_APPID_LEN 129
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_CGROUP
|
#ifdef CONFIG_BLK_CGROUP
|
||||||
extern struct cgroup_subsys_state * const blkcg_root_css;
|
extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||||
|
|
||||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
|
void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
|
||||||
void blkcg_maybe_throttle_current(void);
|
void blkcg_maybe_throttle_current(void);
|
||||||
bool blk_cgroup_congested(void);
|
bool blk_cgroup_congested(void);
|
||||||
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
|
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
|
||||||
@ -39,7 +39,6 @@ struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
|
|||||||
|
|
||||||
static inline void blkcg_maybe_throttle_current(void) { }
|
static inline void blkcg_maybe_throttle_current(void) { }
|
||||||
static inline bool blk_cgroup_congested(void) { return false; }
|
static inline bool blk_cgroup_congested(void) { return false; }
|
||||||
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
|
|
||||||
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
|
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -3655,7 +3655,7 @@ void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
|
|||||||
plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
|
plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
|
||||||
avail_lists[nid]) {
|
avail_lists[nid]) {
|
||||||
if (si->bdev) {
|
if (si->bdev) {
|
||||||
blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
|
blkcg_schedule_throttle(si->bdev->bd_disk, true);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user