Skip to content

Commit 9cfafe5

Browse files
committed
fix(websocket): Fix race conditions, memory leak, and data loss
- Add state check in abort_connection to prevent double-close - Fix memory leak: free errormsg_buffer on disconnect - Reset connection state on reconnect to prevent stale data - Implement lock ordering for separate TX lock mode - Read buffered data immediately after connection to prevent data loss - Added sdkconfig.ci.tx_lock config
1 parent 9e0bcd4 commit 9cfafe5

File tree

2 files changed

+142
-10
lines changed

2 files changed

+142
-10
lines changed

components/esp_websocket_client/esp_websocket_client.c

Lines changed: 127 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -241,9 +241,29 @@ static esp_err_t esp_websocket_client_dispatch_event(esp_websocket_client_handle
241241
return esp_event_loop_run(client->event_handle, 0);
242242
}
243243

244+
/**
245+
* @brief Abort the WebSocket connection and initiate reconnection or shutdown
246+
*
247+
* @param client WebSocket client handle
248+
* @param error_type Type of error that caused the abort
249+
*
250+
* @return ESP_OK on success, ESP_FAIL on failure
251+
*
252+
* @note PRECONDITION: client->lock MUST be held by the calling thread before calling this function.
253+
* This function does NOT acquire the lock itself. Calling without the lock will result in
254+
* race conditions and undefined behavior.
255+
*/
244256
static esp_err_t esp_websocket_client_abort_connection(esp_websocket_client_handle_t client, esp_websocket_error_type_t error_type)
245257
{
246258
ESP_WS_CLIENT_STATE_CHECK(TAG, client, return ESP_FAIL);
259+
260+
261+
if (client->state == WEBSOCKET_STATE_CLOSING || client->state == WEBSOCKET_STATE_UNKNOW ||
262+
client->state == WEBSOCKET_STATE_WAIT_TIMEOUT) {
263+
ESP_LOGW(TAG, "Connection already closing/closed, skipping abort");
264+
return ESP_OK;
265+
}
266+
247267
esp_transport_close(client->transport);
248268

249269
if (!client->config->auto_reconnect) {
@@ -256,6 +276,17 @@ static esp_err_t esp_websocket_client_abort_connection(esp_websocket_client_hand
256276
}
257277
client->error_handle.error_type = error_type;
258278
esp_websocket_client_dispatch_event(client, WEBSOCKET_EVENT_DISCONNECTED, NULL, 0);
279+
280+
if (client->errormsg_buffer) {
281+
ESP_LOGD(TAG, "Freeing error buffer (%d bytes) - Free heap: %" PRIu32 " bytes",
282+
client->errormsg_size, esp_get_free_heap_size());
283+
free(client->errormsg_buffer);
284+
client->errormsg_buffer = NULL;
285+
client->errormsg_size = 0;
286+
} else {
287+
ESP_LOGD(TAG, "Disconnect - Free heap: %" PRIu32 " bytes", esp_get_free_heap_size());
288+
}
289+
259290
return ESP_OK;
260291
}
261292

@@ -453,6 +484,8 @@ static void destroy_and_free_resources(esp_websocket_client_handle_t client)
453484
esp_websocket_client_destroy_config(client);
454485
if (client->transport_list) {
455486
esp_transport_list_destroy(client->transport_list);
487+
client->transport_list = NULL;
488+
client->transport = NULL;
456489
}
457490
vSemaphoreDelete(client->lock);
458491
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
@@ -671,6 +704,11 @@ static int esp_websocket_client_send_with_exact_opcode(esp_websocket_client_hand
671704
if (wlen < 0 || (wlen == 0 && need_write != 0)) {
672705
ret = wlen;
673706
esp_websocket_free_buf(client, true);
707+
708+
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
709+
xSemaphoreGiveRecursive(client->tx_lock);
710+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY);
711+
#endif
674712
esp_tls_error_handle_t error_handle = esp_transport_get_error_handle(client->transport);
675713
if (error_handle) {
676714
esp_websocket_client_error(client, "esp_transport_write() returned %d, transport_error=%s, tls_error_code=%i, tls_flags=%i, errno=%d",
@@ -679,8 +717,16 @@ static int esp_websocket_client_send_with_exact_opcode(esp_websocket_client_hand
679717
} else {
680718
esp_websocket_client_error(client, "esp_transport_write() returned %d, errno=%d", ret, errno);
681719
}
720+
ESP_LOGD(TAG, "Calling abort_connection due to send error");
721+
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
722+
esp_websocket_client_abort_connection(client, WEBSOCKET_ERROR_TYPE_TCP_TRANSPORT);
723+
xSemaphoreGiveRecursive(client->lock);
724+
return ret;
725+
#else
726+
// Already holding client->lock, safe to call
682727
esp_websocket_client_abort_connection(client, WEBSOCKET_ERROR_TYPE_TCP_TRANSPORT);
683728
goto unlock_and_return;
729+
#endif
684730
}
685731
opcode = 0;
686732
widx += wlen;
@@ -1019,7 +1065,6 @@ static esp_err_t esp_websocket_client_recv(esp_websocket_client_handle_t client)
10191065
esp_websocket_free_buf(client, false);
10201066
return ESP_OK;
10211067
}
1022-
10231068
esp_websocket_client_dispatch_event(client, WEBSOCKET_EVENT_DATA, client->rx_buffer, rlen);
10241069

10251070
client->payload_offset += rlen;
@@ -1030,15 +1075,35 @@ static esp_err_t esp_websocket_client_recv(esp_websocket_client_handle_t client)
10301075
const char *data = (client->payload_len == 0) ? NULL : client->rx_buffer;
10311076
ESP_LOGD(TAG, "Sending PONG with payload len=%d", client->payload_len);
10321077
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
1078+
xSemaphoreGiveRecursive(client->lock); // Release client->lock
1079+
1080+
// Now acquire tx_lock with timeout (consistent with PING/CLOSE handling)
10331081
if (xSemaphoreTakeRecursive(client->tx_lock, WEBSOCKET_TX_LOCK_TIMEOUT_MS) != pdPASS) {
1034-
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1035-
return ESP_FAIL;
1082+
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout for PONG", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1083+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY); // Re-acquire client->lock before returning
1084+
esp_websocket_free_buf(client, false); // Free rx_buffer to prevent memory leak
1085+
return ESP_OK; // Return gracefully, caller expects client->lock to be held
10361086
}
1037-
#endif
1087+
1088+
// Re-acquire client->lock to maintain consistency
1089+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY);
1090+
1091+
1092+
// Another thread may have closed it while we didn't hold client->lock
1093+
if (client->state == WEBSOCKET_STATE_CLOSING || client->state == WEBSOCKET_STATE_UNKNOW ||
1094+
client->state == WEBSOCKET_STATE_WAIT_TIMEOUT || client->transport == NULL) {
1095+
ESP_LOGW(TAG, "Transport closed while preparing PONG, skipping send");
1096+
xSemaphoreGiveRecursive(client->tx_lock);
1097+
esp_websocket_free_buf(client, false); // Free rx_buffer to prevent memory leak
1098+
return ESP_OK; // Caller expects client->lock to be held, which it is
1099+
}
1100+
10381101
esp_transport_ws_send_raw(client->transport, WS_TRANSPORT_OPCODES_PONG | WS_TRANSPORT_OPCODES_FIN, data, client->payload_len,
10391102
client->config->network_timeout_ms);
1040-
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
10411103
xSemaphoreGiveRecursive(client->tx_lock);
1104+
#else
1105+
esp_transport_ws_send_raw(client->transport, WS_TRANSPORT_OPCODES_PONG | WS_TRANSPORT_OPCODES_FIN, data, client->payload_len,
1106+
client->config->network_timeout_ms);
10421107
#endif
10431108
} else if (client->last_opcode == WS_TRANSPORT_OPCODES_PONG) {
10441109
client->wait_for_pong_resp = false;
@@ -1136,7 +1201,28 @@ static void esp_websocket_client_task(void *pv)
11361201
client->state = WEBSOCKET_STATE_CONNECTED;
11371202
client->wait_for_pong_resp = false;
11381203
client->error_handle.error_type = WEBSOCKET_ERROR_TYPE_NONE;
1204+
client->payload_len = 0;
1205+
client->payload_offset = 0;
1206+
client->last_fin = false;
1207+
client->last_opcode = WS_TRANSPORT_OPCODES_NONE;
1208+
11391209
esp_websocket_client_dispatch_event(client, WEBSOCKET_EVENT_CONNECTED, NULL, 0);
1210+
esp_err_t recv_result = esp_websocket_client_recv(client);
1211+
if (recv_result == ESP_OK) {
1212+
xSemaphoreGiveRecursive(client->lock);
1213+
esp_event_loop_run(client->event_handle, 0);
1214+
if (xSemaphoreTakeRecursive(client->lock, lock_timeout) != pdPASS) {
1215+
ESP_LOGE(TAG, "Failed to re-acquire lock after event loop within timeout, retrying with portMAX_DELAY");
1216+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY);
1217+
}
1218+
if (client->state != WEBSOCKET_STATE_CONNECTED || client->transport == NULL) {
1219+
ESP_LOGD(TAG, "Connection state changed during handshake data processing");
1220+
break;
1221+
}
1222+
} else if (recv_result == ESP_FAIL) {
1223+
ESP_LOGE(TAG, "Error receive data during initial connection");
1224+
esp_websocket_client_abort_connection(client, WEBSOCKET_ERROR_TYPE_TCP_TRANSPORT);
1225+
}
11401226
break;
11411227
case WEBSOCKET_STATE_CONNECTED:
11421228
if ((CLOSE_FRAME_SENT_BIT & xEventGroupGetBits(client->status_bits)) == 0) { // only send and check for PING
@@ -1145,8 +1231,23 @@ static void esp_websocket_client_task(void *pv)
11451231
client->ping_tick_ms = _tick_get_ms();
11461232
ESP_LOGD(TAG, "Sending PING...");
11471233
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
1234+
// Release client->lock first to avoid deadlock with send error path
1235+
xSemaphoreGiveRecursive(client->lock);
1236+
1237+
// Now acquire tx_lock with timeout (consistent with PONG handling)
11481238
if (xSemaphoreTakeRecursive(client->tx_lock, WEBSOCKET_TX_LOCK_TIMEOUT_MS) != pdPASS) {
1149-
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1239+
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout for PING", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1240+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY); // Re-acquire client->lock before break
1241+
break;
1242+
}
1243+
1244+
// Re-acquire client->lock to check state
1245+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY);
1246+
1247+
// Another thread may have closed it while we didn't hold client->lock
1248+
if (client->state != WEBSOCKET_STATE_CONNECTED || client->transport == NULL) {
1249+
ESP_LOGW(TAG, "Transport closed while preparing PING, skipping send");
1250+
xSemaphoreGiveRecursive(client->tx_lock);
11501251
break;
11511252
}
11521253
#endif
@@ -1182,8 +1283,23 @@ static void esp_websocket_client_task(void *pv)
11821283
if ((CLOSE_FRAME_SENT_BIT & xEventGroupGetBits(client->status_bits)) == 0) {
11831284
ESP_LOGD(TAG, "Closing initiated by the server, sending close frame");
11841285
#ifdef CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK
1286+
// Release client->lock first to avoid deadlock with send error path
1287+
xSemaphoreGiveRecursive(client->lock);
1288+
1289+
// Now acquire tx_lock with timeout (consistent with PONG/PING handling)
11851290
if (xSemaphoreTakeRecursive(client->tx_lock, WEBSOCKET_TX_LOCK_TIMEOUT_MS) != pdPASS) {
1186-
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1291+
ESP_LOGE(TAG, "Could not lock ws-client within %d timeout for CLOSE", WEBSOCKET_TX_LOCK_TIMEOUT_MS);
1292+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY); // Re-acquire client->lock before break
1293+
break;
1294+
}
1295+
1296+
// Re-acquire client->lock to check state
1297+
xSemaphoreTakeRecursive(client->lock, portMAX_DELAY);
1298+
1299+
// Another thread may have closed it while we didn't hold client->lock
1300+
if (client->state != WEBSOCKET_STATE_CLOSING || client->transport == NULL) {
1301+
ESP_LOGW(TAG, "Transport closed while preparing CLOSE frame, skipping send");
1302+
xSemaphoreGiveRecursive(client->tx_lock);
11871303
break;
11881304
}
11891305
#endif
@@ -1202,6 +1318,7 @@ static void esp_websocket_client_task(void *pv)
12021318
if (WEBSOCKET_STATE_CONNECTED == client->state) {
12031319
read_select = esp_transport_poll_read(client->transport, 1000); //Poll every 1000ms
12041320
if (read_select < 0) {
1321+
xSemaphoreTakeRecursive(client->lock, lock_timeout);
12051322
esp_tls_error_handle_t error_handle = esp_transport_get_error_handle(client->transport);
12061323
if (error_handle) {
12071324
esp_websocket_client_error(client, "esp_transport_poll_read() returned %d, transport_error=%s, tls_error_code=%i, tls_flags=%i, errno=%d",
@@ -1210,16 +1327,16 @@ static void esp_websocket_client_task(void *pv)
12101327
} else {
12111328
esp_websocket_client_error(client, "esp_transport_poll_read() returned %d, errno=%d", read_select, errno);
12121329
}
1213-
xSemaphoreTakeRecursive(client->lock, lock_timeout);
12141330
esp_websocket_client_abort_connection(client, WEBSOCKET_ERROR_TYPE_TCP_TRANSPORT);
12151331
xSemaphoreGiveRecursive(client->lock);
12161332
} else if (read_select > 0) {
1333+
xSemaphoreTakeRecursive(client->lock, lock_timeout);
12171334
if (esp_websocket_client_recv(client) == ESP_FAIL) {
12181335
ESP_LOGE(TAG, "Error receive data");
1219-
xSemaphoreTakeRecursive(client->lock, lock_timeout);
1336+
// Note: Already holding client->lock from line above
12201337
esp_websocket_client_abort_connection(client, WEBSOCKET_ERROR_TYPE_TCP_TRANSPORT);
1221-
xSemaphoreGiveRecursive(client->lock);
12221338
}
1339+
xSemaphoreGiveRecursive(client->lock);
12231340
} else {
12241341
ESP_LOGV(TAG, "Read poll timeout: skipping esp_transport_poll_read().");
12251342
}
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
CONFIG_IDF_TARGET="esp32"
2+
CONFIG_IDF_TARGET_LINUX=n
3+
CONFIG_WEBSOCKET_URI_FROM_STDIN=n
4+
CONFIG_WEBSOCKET_URI_FROM_STRING=y
5+
CONFIG_EXAMPLE_CONNECT_ETHERNET=y
6+
CONFIG_EXAMPLE_CONNECT_WIFI=n
7+
CONFIG_EXAMPLE_USE_INTERNAL_ETHERNET=y
8+
CONFIG_EXAMPLE_ETH_PHY_IP101=y
9+
CONFIG_EXAMPLE_ETH_MDC_GPIO=23
10+
CONFIG_EXAMPLE_ETH_MDIO_GPIO=18
11+
CONFIG_EXAMPLE_ETH_PHY_RST_GPIO=5
12+
CONFIG_EXAMPLE_ETH_PHY_ADDR=1
13+
CONFIG_EXAMPLE_CONNECT_IPV6=y
14+
CONFIG_ESP_WS_CLIENT_SEPARATE_TX_LOCK=y
15+
CONFIG_ESP_WS_CLIENT_TX_LOCK_TIMEOUT_MS=2000

0 commit comments

Comments
 (0)