blk-mq: clearing flush request reference in tags->rqs[]
Before we free request queue, clearing flush request reference in tags->rqs[], so that potential UAF can be avoided. Based on one patch written by David Jeffery. Tested-by: John Garry <john.garry@huawei.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: David Jeffery <djeffery@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20210511152236.763464-5-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bd63141d58
commit
364b61818f
@ -2643,16 +2643,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
|||||||
&hctx->cpuhp_dead);
|
&hctx->cpuhp_dead);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Before freeing hw queue, clearing the flush request reference in
|
||||||
|
* tags->rqs[] for avoiding potential UAF.
|
||||||
|
*/
|
||||||
|
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
|
||||||
|
unsigned int queue_depth, struct request *flush_rq)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* The hw queue may not be mapped yet */
|
||||||
|
if (!tags)
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
|
||||||
|
|
||||||
|
for (i = 0; i < queue_depth; i++)
|
||||||
|
cmpxchg(&tags->rqs[i], flush_rq, NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait until all pending iteration is done.
|
||||||
|
*
|
||||||
|
* Request reference is cleared and it is guaranteed to be observed
|
||||||
|
* after the ->lock is released.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&tags->lock, flags);
|
||||||
|
spin_unlock_irqrestore(&tags->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* hctx->ctxs will be freed in queue's release handler */
|
/* hctx->ctxs will be freed in queue's release handler */
|
||||||
static void blk_mq_exit_hctx(struct request_queue *q,
|
static void blk_mq_exit_hctx(struct request_queue *q,
|
||||||
struct blk_mq_tag_set *set,
|
struct blk_mq_tag_set *set,
|
||||||
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||||
{
|
{
|
||||||
|
struct request *flush_rq = hctx->fq->flush_rq;
|
||||||
|
|
||||||
if (blk_mq_hw_queue_mapped(hctx))
|
if (blk_mq_hw_queue_mapped(hctx))
|
||||||
blk_mq_tag_idle(hctx);
|
blk_mq_tag_idle(hctx);
|
||||||
|
|
||||||
|
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
|
||||||
|
set->queue_depth, flush_rq);
|
||||||
if (set->ops->exit_request)
|
if (set->ops->exit_request)
|
||||||
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
|
set->ops->exit_request(set, flush_rq, hctx_idx);
|
||||||
|
|
||||||
if (set->ops->exit_hctx)
|
if (set->ops->exit_hctx)
|
||||||
set->ops->exit_hctx(hctx, hctx_idx);
|
set->ops->exit_hctx(hctx, hctx_idx);
|
||||||
|
Loading…
Reference in New Issue
Block a user