blk-mq: always allow reserved allocation in hctx_may_queue
authorMing Lei <ming.lei@redhat.com>
Fri, 11 Sep 2020 10:41:14 +0000 (18:41 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 Oct 2020 09:12:10 +0000 (10:12 +0100)
[ Upstream commit 285008501c65a3fcee05d2c2c26cbf629ceff2f0 ]

NVMe shares tagset between fabric queue and admin queue or between
connect_q and NS queue, so hctx_may_queue() can be called to allocate
request for these queues.

Tags can be reserved in these tagset. Before error recovery, there is
often lots of in-flight requests which can't be completed, and new
reserved request may be needed in error recovery path. However,
hctx_may_queue() can always return false because there is too many
in-flight requests which can't be completed during error handling.
Finally, nothing can proceed.

Fix this issue by always allowing reserved tag allocation in
hctx_may_queue(). This is reasonable because reserved tags are supposed
to always be available.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cc: David Milburn <dmilburn@redhat.com>
Cc: Ewan D. Milne <emilne@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
block/blk-mq-tag.c
block/blk-mq.c

index 32d82e23b0953140c9cd407c438a3c28d77cc688..a1c1e7c611f7b12916bbded775e64e067a004697 100644 (file)
@@ -59,7 +59,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
                            struct sbitmap_queue *bt)
 {
-       if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
+       if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
+                       !hctx_may_queue(data->hctx, bt))
                return BLK_MQ_NO_TAG;
 
        if (data->shallow_depth)
index c27a61029cdd05c92a86617655d729178e7d03f0..94a53d779c12bfd1ae1dca43eb5873439b73d9fe 100644 (file)
@@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
        if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
                bt = &rq->mq_hctx->tags->breserved_tags;
                tag_offset = 0;
+       } else {
+               if (!hctx_may_queue(rq->mq_hctx, bt))
+                       return false;
        }
 
-       if (!hctx_may_queue(rq->mq_hctx, bt))
-               return false;
        tag = __sbitmap_queue_get(bt);
        if (tag == BLK_MQ_NO_TAG)
                return false;