Skip to content

Commit 77e40bb

Browse files
mliang-pureChristoph Hellwig
authored and
Christoph Hellwig
committed
nvme-tcp: fix premature queue removal and I/O failover
This patch addresses a data corruption issue observed in nvme-tcp during testing. In an NVMe native multipath setup, when an I/O timeout occurs, all inflight I/Os are canceled almost immediately after the kernel socket is shut down. These canceled I/Os are reported as host path errors, triggering a failover that succeeds on a different path. However, at this point, the original I/O may still be outstanding in the host's network transmission path (e.g., the NIC’s TX queue). From the user-space app's perspective, the buffer associated with the I/O is considered completed since they're acked on the different path and may be reused for new I/O requests. Because nvme-tcp enables zero-copy by default in the transmission path, this can lead to corrupted data being sent to the original target, ultimately causing data corruption. We can reproduce this data corruption by injecting delay on one path and triggering i/o timeout. To prevent this issue, this change ensures that all inflight transmissions are fully completed from host's perspective before returning from queue stop. To handle concurrent I/O timeout from multiple namespaces under the same controller, always wait in queue stop regardless of queue's state. This aligns with the behavior of queue stopping in other NVMe fabric transports. Fixes: 3f2304f ("nvme-tcp: add NVMe over TCP host driver") Signed-off-by: Michael Liang <[email protected]> Reviewed-by: Mohamed Khalfella <[email protected]> Reviewed-by: Randy Jennings <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent ab35ad9 commit 77e40bb

File tree

1 file changed

+29
-2
lines changed

1 file changed

+29
-2
lines changed

drivers/nvme/host/tcp.c

+29-2
Original file line numberDiff line numberDiff line change
@@ -1946,7 +1946,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
19461946
cancel_work_sync(&queue->io_work);
19471947
}
19481948

1949-
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1949+
static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
19501950
{
19511951
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
19521952
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1965,6 +1965,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
19651965
mutex_unlock(&queue->queue_lock);
19661966
}
19671967

1968+
static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
1969+
{
1970+
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1971+
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1972+
int timeout = 100;
1973+
1974+
while (timeout > 0) {
1975+
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
1976+
!sk_wmem_alloc_get(queue->sock->sk))
1977+
return;
1978+
msleep(2);
1979+
timeout -= 2;
1980+
}
1981+
dev_warn(nctrl->device,
1982+
"qid %d: timeout draining sock wmem allocation expired\n",
1983+
qid);
1984+
}
1985+
1986+
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1987+
{
1988+
nvme_tcp_stop_queue_nowait(nctrl, qid);
1989+
nvme_tcp_wait_queue(nctrl, qid);
1990+
}
1991+
1992+
19681993
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
19691994
{
19701995
write_lock_bh(&queue->sock->sk->sk_callback_lock);
@@ -2032,7 +2057,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
20322057
int i;
20332058

20342059
for (i = 1; i < ctrl->queue_count; i++)
2035-
nvme_tcp_stop_queue(ctrl, i);
2060+
nvme_tcp_stop_queue_nowait(ctrl, i);
2061+
for (i = 1; i < ctrl->queue_count; i++)
2062+
nvme_tcp_wait_queue(ctrl, i);
20362063
}
20372064

20382065
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,

0 commit comments

Comments
 (0)