Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that should go into the next -rc. This contains:

   - A use-after-free in the request_list exit for the legacy IO path,
     from Bart.

   - A fix for CFQ, fixing a recent regression with the conversion to
     higher resolution timing for iops mode. From Hou Tao.

   - A single fix for nbd, split in two patches, fixing a leak of a data
     structure.

   - A regression fix from Keith, ensuring that callers of
     blk_mq_update_nr_hw_queues() hold the right lock"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: Avoid that blk_exit_rl() triggers a use-after-free
  cfq-iosched: fix the delay of cfq_group's vdisktime under iops mode
  blk-mq: Take tagset lock when updating hw queues
  nbd: don't leak nbd_config
  nbd: nbd_reset() call in nbd_dev_add() is redundant
This commit is contained in:
Linus Torvalds 2017-06-02 11:44:46 -07:00
commit bb329859ef
7 changed files with 40 additions and 18 deletions

View File

@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->blkcg != &blkcg_root)
blk_exit_rl(&blkg->rl);
blk_exit_rl(blkg->q, &blkg->rl);
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);

View File

@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
if (!rl->rq_pool)
return -ENOMEM;
if (rl != &q->root_rl)
WARN_ON_ONCE(!blk_get_queue(q));
return 0;
}
void blk_exit_rl(struct request_list *rl)
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
{
if (rl->rq_pool)
if (rl->rq_pool) {
mempool_destroy(rl->rq_pool);
if (rl != &q->root_rl)
blk_put_queue(q);
}
}
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)

View File

@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
mutex_lock(&set->tag_list_lock);
__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
mutex_unlock(&set->tag_list_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */

View File

@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
blk_exit_rl(&q->root_rl);
blk_exit_rl(q, &q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);

View File

@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_list *rl);
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);

View File

@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
/*
* offset from end of service tree
* offset from end of queue service tree for idle class
*/
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service tree under time slice mode */
#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service under IOPS mode */
#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
/*
* below this threshold, we consider thinktime immediate
@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
cfqg->vfraction = max_t(unsigned, vfr, 1);
}
static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
{
if (!iops_mode(cfqd))
return CFQ_SLICE_MODE_GROUP_DELAY;
else
return CFQ_IOPS_MODE_GROUP_DELAY;
}
static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
n = rb_last(&st->rb);
if (n) {
__cfqg = rb_entry_cfqg(n);
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
cfqg->vdisktime = __cfqg->vdisktime +
cfq_get_cfqg_vdisktime_delay(cfqd);
} else
cfqg->vdisktime = st->min_vdisktime;
cfq_group_service_tree_add(st, cfqg);

View File

@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
return -ENOSPC;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
nbd->config = NULL;
nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}
static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
}
kfree(config->socks);
}
nbd_reset(nbd);
kfree(nbd->config);
nbd->config = NULL;
nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
nbd_reset(nbd);
add_disk(disk);
nbd_total_devices++;
return index;