BACKPORT: blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter

Grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter(), and
this way will prevent the request from being re-used when ->fn is
running. The approach is same as what we do during handling timeout.

Fix request use-after-free(UAF) related with completion race or queue
releasing:

- If one rq is referred before rq->q is frozen, then queue won't be
frozen before the request is released during iteration.

- If one rq is referred after rq->q is frozen, refcount_inc_not_zero()
will return false, and we won't iterate over this request.

However, still one request UAF not covered: refcount_inc_not_zero() may
read one freed request, and it will be handled in next patch.

Tested-by: John Garry <john.garry@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>.

Bug: 197804811
Change-Id: Ib80fe8b9f1d76d2489e41f3365fbd12d68a3b097
[Pradeep: Resolved conflicts in block/blk-mq-tag.c]
Git-commit: a5d38e7c26ca21a544635732a711176017663168
Git-repo: https://android.googlesource.com/kernel/common/
Signed-off-by: Pradeep P V K <quic_pragalla@quicinc.com>
This commit is contained in:
Ming Lei 2021-05-11 23:22:34 +08:00 committed by Pradeep P V K
parent 06b23a9ca4
commit f3b26aea99
3 changed files with 39 additions and 11 deletions

View file

@ -218,6 +218,16 @@ struct bt_iter_data {
bool reserved;
};
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
unsigned int bitnr)
{
struct request *rq = tags->rqs[bitnr];
if (!rq || !refcount_inc_not_zero(&rq->ref))
return NULL;
return rq;
}
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_iter_data *iter_data = data;
@ -225,18 +235,23 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = hctx->tags;
bool reserved = iter_data->reserved;
struct request *rq;
bool ret = true;
if (!reserved)
bitnr += tags->nr_reserved_tags;
rq = tags->rqs[bitnr];
/*
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assining ->rqs[].
*/
if (rq && rq->q == hctx->queue)
rq = blk_mq_find_and_get_req(tags, bitnr);
if (!rq)
return true;
if (rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
blk_mq_put_rq_ref(rq);
return ret;
}
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
@ -265,6 +280,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = iter_data->tags;
bool reserved = iter_data->reserved;
struct request *rq;
bool ret = true;
if (!reserved)
bitnr += tags->nr_reserved_tags;
@ -273,11 +289,13 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assining ->rqs[].
*/
rq = tags->rqs[bitnr];
if (rq && blk_mq_request_started(rq))
rq = blk_mq_find_and_get_req(tags, bitnr);
if (!rq)
return true;
if (blk_mq_request_started(rq))
iter_data->fn(rq, iter_data->data, reserved);
return true;
blk_mq_put_rq_ref(rq);
return ret;
}
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,

View file

@ -812,6 +812,17 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false;
}
void blk_mq_put_rq_ref(struct request *rq)
{
struct blk_mq_hw_ctx *hctx;
hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
if (is_flush_rq(rq, hctx))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
}
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
@ -844,11 +855,9 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
blk_mq_put_rq_ref(rq);
return;
if (is_flush_rq(rq, hctx))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
}
static void blk_mq_timeout_work(struct work_struct *work)

View file

@ -39,6 +39,7 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq);
/*
* Internal helpers for allocating/freeing the request map