Skip to content

Commit 01b91bf

Browse files
Ming Leiaxboe
Ming Lei
authored andcommitted
block: don't grab elevator lock during queue initialization
->elevator_lock depends on queue freeze lock, see block/blk-sysfs.c. queue freeze lock depends on fs_reclaim. So don't grab elevator lock during queue initialization which needs to call kmalloc(GFP_KERNEL), and we can cut the dependency between ->elevator_lock and fs_reclaim, then the lockdep warning can be killed. This way is safe because elevator setting isn't ready to run during queue initialization. There isn't such issue in __blk_mq_update_nr_hw_queues() because memalloc_noio_save() is called before acquiring elevator lock. Fixes the following lockdep warning: https://lore.kernel.org/linux-block/[email protected]/ Reported-by: [email protected] Cc: Nilay Shroff <[email protected]> Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent fb58555 commit 01b91bf

File tree

1 file changed

+17
-7
lines changed

1 file changed

+17
-7
lines changed

block/blk-mq.c

+17-7
Original file line numberDiff line numberDiff line change
@@ -4464,14 +4464,12 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
44644464
return NULL;
44654465
}
44664466

4467-
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4468-
struct request_queue *q)
4467+
static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4468+
struct request_queue *q)
44694469
{
44704470
struct blk_mq_hw_ctx *hctx;
44714471
unsigned long i, j;
44724472

4473-
/* protect against switching io scheduler */
4474-
mutex_lock(&q->elevator_lock);
44754473
for (i = 0; i < set->nr_hw_queues; i++) {
44764474
int old_node;
44774475
int node = blk_mq_get_hctx_node(set, i);
@@ -4504,7 +4502,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
45044502

45054503
xa_for_each_start(&q->hctx_table, j, hctx, j)
45064504
blk_mq_exit_hctx(q, set, hctx, j);
4507-
mutex_unlock(&q->elevator_lock);
4505+
}
4506+
4507+
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4508+
struct request_queue *q, bool lock)
4509+
{
4510+
if (lock) {
4511+
/* protect against switching io scheduler */
4512+
mutex_lock(&q->elevator_lock);
4513+
__blk_mq_realloc_hw_ctxs(set, q);
4514+
mutex_unlock(&q->elevator_lock);
4515+
} else {
4516+
__blk_mq_realloc_hw_ctxs(set, q);
4517+
}
45084518

45094519
/* unregister cpuhp callbacks for exited hctxs */
45104520
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4536,7 +4546,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45364546

45374547
xa_init(&q->hctx_table);
45384548

4539-
blk_mq_realloc_hw_ctxs(set, q);
4549+
blk_mq_realloc_hw_ctxs(set, q, false);
45404550
if (!q->nr_hw_queues)
45414551
goto err_hctxs;
45424552

@@ -5032,7 +5042,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50325042
fallback:
50335043
blk_mq_update_queue_map(set);
50345044
list_for_each_entry(q, &set->tag_list, tag_set_list) {
5035-
blk_mq_realloc_hw_ctxs(set, q);
5045+
blk_mq_realloc_hw_ctxs(set, q, true);
50365046

50375047
if (q->nr_hw_queues != set->nr_hw_queues) {
50385048
int i = prev_nr_hw_queues;

0 commit comments

Comments
 (0)