mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
blk-mq: change gfp flags to GFP_NOIO in blk_mq_realloc_hw_ctxs
blk_mq_realloc_hw_ctxs could be invoked during update hw queues. At the momemt, IO is blocked. Change the gfp flags from GFP_KERNEL to GFP_NOIO to avoid forever hang during memory allocation in blk_mq_realloc_hw_ctxs. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
477e19dedc
commit
5b202853ff
@ -1163,7 +1163,7 @@ int blk_init_allocated_queue(struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(q->mq_ops);
|
||||
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
|
||||
if (!q->fq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -566,12 +566,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
||||
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
||||
int node, int cmd_size)
|
||||
int node, int cmd_size, gfp_t flags)
|
||||
{
|
||||
struct blk_flush_queue *fq;
|
||||
int rq_sz = sizeof(struct request);
|
||||
|
||||
fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
|
||||
fq = kzalloc_node(sizeof(*fq), flags, node);
|
||||
if (!fq)
|
||||
goto fail;
|
||||
|
||||
@ -579,7 +579,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
||||
spin_lock_init(&fq->mq_flush_lock);
|
||||
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
|
||||
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
|
||||
if (!fq->flush_rq)
|
||||
goto fail_rq;
|
||||
|
||||
|
@ -2210,12 +2210,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
* runtime
|
||||
*/
|
||||
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
|
||||
GFP_KERNEL, node);
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
|
||||
if (!hctx->ctxs)
|
||||
goto unregister_cpu_notifier;
|
||||
|
||||
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
|
||||
node))
|
||||
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
|
||||
goto free_ctxs;
|
||||
|
||||
hctx->nr_ctx = 0;
|
||||
@ -2228,7 +2228,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||
goto free_bitmap;
|
||||
|
||||
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
|
||||
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (!hctx->fq)
|
||||
goto exit_hctx;
|
||||
|
||||
@ -2536,12 +2537,14 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
||||
|
||||
node = blk_mq_hw_queue_to_node(q->mq_map, i);
|
||||
hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
|
||||
GFP_KERNEL, node);
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
|
||||
node);
|
||||
if (!hctxs[i])
|
||||
break;
|
||||
|
||||
if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
|
||||
node)) {
|
||||
if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
|
||||
node)) {
|
||||
kfree(hctxs[i]);
|
||||
hctxs[i] = NULL;
|
||||
break;
|
||||
|
@ -125,7 +125,7 @@ static inline void __blk_get_queue(struct request_queue *q)
|
||||
}
|
||||
|
||||
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
||||
int node, int cmd_size);
|
||||
int node, int cmd_size, gfp_t flags);
|
||||
void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||
|
||||
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||
|
Loading…
Reference in New Issue
Block a user