@@ -1013,7 +1013,8 @@ impl<RT: Runtime, W: IsolateWorker<RT>> IsolateScheduler<RT, W> {
1013
1013
completed_worker = in_progress_workers. select_next_some( ) => {
1014
1014
log_pool_running_count(
1015
1015
self . worker. config( ) . name,
1016
- in_progress_workers. len( )
1016
+ in_progress_workers. len( ) ,
1017
+ "" // This is a single tenant scheduler used in the backend.
1017
1018
) ;
1018
1019
let Ok ( completed_worker) = completed_worker else {
1019
1020
// Worker has shut down, so we should shut down too.
@@ -1049,7 +1050,13 @@ impl<RT: Runtime, W: IsolateWorker<RT>> IsolateScheduler<RT, W> {
1049
1050
return ;
1050
1051
}
1051
1052
in_progress_workers. push ( done_receiver) ;
1052
- log_pool_running_count ( self . worker . config ( ) . name , in_progress_workers. len ( ) ) ;
1053
+ // This is a single tenant scheduler used in the backend.
1054
+ let client_id = "" ;
1055
+ log_pool_running_count (
1056
+ self . worker . config ( ) . name ,
1057
+ in_progress_workers. len ( ) ,
1058
+ client_id,
1059
+ ) ;
1053
1060
}
1054
1061
}
1055
1062
}
@@ -1115,22 +1122,30 @@ impl<RT: Runtime, W: IsolateWorker<RT>> SharedIsolateScheduler<RT, W> {
1115
1122
}
1116
1123
1117
1124
fn handle_completed_worker ( & mut self , completed_worker : ActiveWorkerState ) {
1118
- match self
1125
+ let new_count = match self
1119
1126
. in_progress_count
1120
1127
. remove_entry ( & completed_worker. client_id )
1121
1128
{
1122
1129
Some ( ( client_id, count) ) if count > 1 => {
1123
1130
self . in_progress_count . insert ( client_id, count - 1 ) ;
1131
+ count - 1
1124
1132
} ,
1125
1133
Some ( ( _, 1 ) ) => {
1126
1134
// Nothing to do; we've already removed the entry above.
1135
+ 0
1127
1136
} ,
1128
1137
_ => panic ! (
1129
1138
"Inconsistent state in `in_progress_count` map; the count of active workers for \
1130
1139
client {} must be >= 1",
1131
1140
completed_worker. client_id
1132
1141
) ,
1133
- }
1142
+ } ;
1143
+ log_pool_running_count (
1144
+ self . worker . config ( ) . name ,
1145
+ new_count,
1146
+ & completed_worker. client_id ,
1147
+ ) ;
1148
+
1134
1149
self . available_workers
1135
1150
. entry ( completed_worker. client_id )
1136
1151
. or_default ( )
@@ -1146,10 +1161,6 @@ impl<RT: Runtime, W: IsolateWorker<RT>> SharedIsolateScheduler<RT, W> {
1146
1161
loop {
1147
1162
select_biased ! {
1148
1163
completed_worker = self . in_progress_workers. select_next_some( ) => {
1149
- log_pool_running_count(
1150
- self . worker. config( ) . name,
1151
- self . in_progress_workers. len( )
1152
- ) ;
1153
1164
let Ok ( completed_worker) : Result <ActiveWorkerState , _> = completed_worker else {
1154
1165
tracing:: warn!( "Worker has shut down uncleanly. Shutting down {} scheduler." , self . worker. config( ) . name) ;
1155
1166
return ;
@@ -1171,10 +1182,16 @@ impl<RT: Runtime, W: IsolateWorker<RT>> SharedIsolateScheduler<RT, W> {
1171
1182
} ;
1172
1183
let ( done_sender, done_receiver) = oneshot:: channel( ) ;
1173
1184
self . in_progress_workers. push( done_receiver) ;
1174
- * self
1185
+ let entry = self
1175
1186
. in_progress_count
1176
1187
. entry( request. client_id. clone( ) )
1177
- . or_default( ) += 1 ;
1188
+ . or_default( ) ;
1189
+ * entry += 1 ;
1190
+ log_pool_running_count(
1191
+ self . worker. config( ) . name,
1192
+ * entry,
1193
+ & request. client_id,
1194
+ ) ;
1178
1195
let client_id = request. client_id. clone( ) ;
1179
1196
if self . worker_senders[ worker_id]
1180
1197
. try_send( (
@@ -1195,10 +1212,6 @@ impl<RT: Runtime, W: IsolateWorker<RT>> SharedIsolateScheduler<RT, W> {
1195
1212
) ;
1196
1213
return ;
1197
1214
}
1198
- log_pool_running_count(
1199
- self . worker. config( ) . name,
1200
- self . in_progress_workers. len( )
1201
- ) ;
1202
1215
} ,
1203
1216
_ = report_stats => {
1204
1217
let heap_stats = self . aggregate_heap_stats( ) ;
0 commit comments