mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
bfq-iosched: remove unused variable
bfqd->sb_shift was attempted used as a cache for the sbitmap queue shift, but we don't need it, as it never changes. Kill it with fire. Acked-by: Paolo Valente <paolo.valente@linaro.org> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f0635b8a41
commit
bd7d4ef6a4
@ -5085,26 +5085,24 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
|
||||
*/
|
||||
static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
|
||||
{
|
||||
bfqd->sb_shift = bt->sb.shift;
|
||||
|
||||
/*
|
||||
* In-word depths if no bfq_queue is being weight-raised:
|
||||
* leaving 25% of tags only for sync reads.
|
||||
*
|
||||
* In next formulas, right-shift the value
|
||||
* (1U<<bfqd->sb_shift), instead of computing directly
|
||||
* (1U<<(bfqd->sb_shift - something)), to be robust against
|
||||
* any possible value of bfqd->sb_shift, without having to
|
||||
* (1U<<bt->sb.shift), instead of computing directly
|
||||
* (1U<<(bt->sb.shift - something)), to be robust against
|
||||
* any possible value of bt->sb.shift, without having to
|
||||
* limit 'something'.
|
||||
*/
|
||||
/* no more than 50% of tags for async I/O */
|
||||
bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
|
||||
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
|
||||
/*
|
||||
* no more than 75% of tags for sync writes (25% extra tags
|
||||
* w.r.t. async I/O, to prevent async I/O from starving sync
|
||||
* writes)
|
||||
*/
|
||||
bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
|
||||
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
|
||||
|
||||
/*
|
||||
* In-word depths in case some bfq_queue is being weight-
|
||||
@ -5114,9 +5112,9 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
|
||||
* shortage.
|
||||
*/
|
||||
/* no more than ~18% of tags for async I/O */
|
||||
bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
|
||||
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
|
||||
/* no more than ~37% of tags for sync writes (~20% extra tags) */
|
||||
bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
|
||||
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
|
||||
}
|
||||
|
||||
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
|
||||
|
@ -635,12 +635,6 @@ struct bfq_data {
|
||||
/* bfqq associated with the task issuing current bio for merging */
|
||||
struct bfq_queue *bio_bfqq;
|
||||
|
||||
/*
|
||||
* Cached sbitmap shift, used to compute depth limits in
|
||||
* bfq_update_depths.
|
||||
*/
|
||||
unsigned int sb_shift;
|
||||
|
||||
/*
|
||||
* Depth limits used in bfq_limit_depth (see comments on the
|
||||
* function)
|
||||
|
Loading…
Reference in New Issue
Block a user