Squashed revert of recent blk-mq changes

Revert "BACKPORT: blk-mq: fix is_flush_rq"

This reverts commit 09de4ca5ab.

Revert "BACKPORT: blk-mq: clearing flush request reference in tags->rqs[]"

This reverts commit bcf59d6f28.

Revert "BACKPORT: blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter"

This reverts commit 66b99c5dad.

Reason for revert: According to the following links, those changes are
not needed. Our kernel already has the proper version of "BACKPORT:
blk-mq: clear stale request in tags->rq[] before freeing one request pool"
and the remaining changes break compilation, so just revert them.
https://android-review.googlesource.com/c/kernel/common/+/2502118/
https://android-review.googlesource.com/c/kernel/common/+/2502120/
https://android-review.googlesource.com/c/kernel/common/+/2502121/

Change-Id: I368ab4b975089b5efc08bd7c8c19976b3934412a
This commit is contained in:
Michael Bestas 2023-12-11 21:12:00 +02:00
parent 930923679e
commit 693180eebb
No known key found for this signature in database
GPG key ID: CC95044519BE6669
5 changed files with 17 additions and 80 deletions

View file

@ -291,11 +291,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}
bool is_flush_rq(struct request *rq)
{
return rq->end_io == flush_end_io;
}
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked

View file

@ -218,20 +218,6 @@ struct bt_iter_data {
bool reserved;
};
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
unsigned int bitnr)
{
struct request *rq;
unsigned long flags;
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
if (!rq || !refcount_inc_not_zero(&rq->ref))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
}
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_iter_data *iter_data = data;
@ -239,23 +225,18 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = hctx->tags;
bool reserved = iter_data->reserved;
struct request *rq;
bool ret = true;
if (!reserved)
bitnr += tags->nr_reserved_tags;
rq = tags->rqs[bitnr];
/*
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assining ->rqs[].
*/
rq = blk_mq_find_and_get_req(tags, bitnr);
if (!rq)
return true;
if (rq->q == hctx->queue)
if (rq && rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
blk_mq_put_rq_ref(rq);
return ret;
return true;
}
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
@ -284,7 +265,6 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = iter_data->tags;
bool reserved = iter_data->reserved;
struct request *rq;
bool ret = true;
if (!reserved)
bitnr += tags->nr_reserved_tags;
@ -293,13 +273,11 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assining ->rqs[].
*/
rq = blk_mq_find_and_get_req(tags, bitnr);
if (!rq)
return true;
if (blk_mq_request_started(rq))
rq = tags->rqs[bitnr];
if (rq && blk_mq_request_started(rq))
iter_data->fn(rq, iter_data->data, reserved);
blk_mq_put_rq_ref(rq);
return ret;
return true;
}
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,

View file

@ -812,14 +812,6 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false;
}
void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
}
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
@ -852,9 +844,11 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
blk_mq_put_rq_ref(rq);
return;
if (is_flush_rq(rq, hctx))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
}
static void blk_mq_timeout_work(struct work_struct *work)
@ -2204,51 +2198,18 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
&hctx->cpuhp_dead);
}
/*
* Before freeing hw queue, clearing the flush request reference in
* tags->rqs[] for avoiding potential UAF.
*/
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
unsigned int queue_depth, struct request *flush_rq)
{
int i;
unsigned long flags;
/* The hw queue may not be mapped yet */
if (!tags)
return;
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);
/*
* Wait until all pending iteration is done.
*
* Request reference is cleared and it is guaranteed to be observed
* after the ->lock is released.
*/
spin_lock_irqsave(&tags->lock, flags);
spin_unlock_irqrestore(&tags->lock, flags);
}
/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
struct request *flush_rq = hctx->fq->flush_rq;
blk_mq_debugfs_unregister_hctx(hctx);
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
set->queue_depth, flush_rq);
if (set->ops->exit_request)
set->ops->exit_request(set, flush_rq, hctx_idx);
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);

View file

@ -39,7 +39,6 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq);
/*
* Internal helpers for allocating/freeing the request map

View file

@ -124,7 +124,11 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
bool is_flush_rq(struct request *req);
static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
return hctx->fq->flush_rq == req;
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);