BACKPORT: blk-mq: clearing flush request reference in tags->rqs[]

Before we free request queue, clearing flush request reference in
tags->rqs[], so that potential UAF can be avoided.

Based on one patch written by David Jeffery.

Tested-by: John Garry <john.garry@huawei.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>.

Bug: 197804811
Change-Id: I9600626e807a4eed546c21be808fabed2a9db9b1
[Upstream: cherry picked from commit 364b61818f65045479e42e76ed8dd6f051778280]
[Todd: refactored to avoid breaking KMI ]
Signed-off-by: Pradeep P V K <pragalla@codeaurora.org>
Signed-off-by: Todd Kjos <tkjos@google.com>
Git-commit: c9a3b51b07a03d515e15e0f79d1d1185e341b8f8
Git-repo: https://android.googlesource.com/kernel/common/
Signed-off-by: Pradeep P V K <quic_pragalla@quicinc.com>
This commit is contained in:
Ming Lei 2021-05-11 23:22:36 +08:00 committed by Pradeep Pragallapati
parent 02c9fbde9b
commit 20042399e9

View file

@ -2198,18 +2198,51 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
&hctx->cpuhp_dead);
}
/*
* Before freeing hw queue, clearing the flush request reference in
* tags->rqs[] for avoiding potential UAF.
*/
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
unsigned int queue_depth, struct request *flush_rq)
{
int i;
unsigned long flags;
/* The hw queue may not be mapped yet */
if (!tags)
return;
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);
/*
* Wait until all pending iteration is done.
*
* Request reference is cleared and it is guaranteed to be observed
* after the ->lock is released.
*/
spin_lock_irqsave(&tags->lock, flags);
spin_unlock_irqrestore(&tags->lock, flags);
}
/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
struct request *flush_rq = hctx->fq->flush_rq;
blk_mq_debugfs_unregister_hctx(hctx);
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
set->queue_depth, flush_rq);
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
set->ops->exit_request(set, flush_rq, hctx_idx);
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);