@@ -72,16 +72,12 @@ ClusterShardInfos GetConfigForStats(ConnectionContext* cntx) {
72
72
return config;
73
73
}
74
74
75
- // We can't mutate `config` so we copy it over
76
- std::vector<ClusterShardInfo> infos;
77
- infos.reserve (config.size ());
78
-
79
- for (auto & node : config) {
80
- infos.push_back (node);
81
- infos.rbegin ()->replicas .clear ();
75
+ auto shards_info = config.Unwrap ();
76
+ for (auto & node : shards_info) {
77
+ node.replicas .clear ();
82
78
}
83
79
84
- return ClusterShardInfos{ std::move (infos)} ;
80
+ return shards_info ;
85
81
}
86
82
87
83
} // namespace
@@ -149,14 +145,15 @@ ClusterShardInfo ClusterFamily::GetEmulatedShardInfo(ConnectionContext* cntx) co
149
145
? static_cast <uint16_t >(absl::GetFlag (FLAGS_port))
150
146
: cluster_announce_port;
151
147
152
- info.master = {{.id = id_, .ip = preferred_endpoint, .port = preferred_port}, NodeHealth::NONE};
148
+ info.master = {{.id = id_, .ip = preferred_endpoint, .port = preferred_port},
149
+ NodeHealth::ONLINE};
153
150
154
151
if (cntx->conn ()->IsPrivileged () || !absl::GetFlag (FLAGS_managed_service_info)) {
155
152
for (const auto & replica : server_family_->GetDflyCmd ()->GetReplicasRoleInfo ()) {
156
153
info.replicas .push_back ({{.id = replica.id ,
157
154
.ip = replica.address ,
158
155
.port = static_cast <uint16_t >(replica.listening_port )},
159
- NodeHealth::NONE });
156
+ NodeHealth::ONLINE });
160
157
}
161
158
}
162
159
} else {
@@ -166,7 +163,7 @@ ClusterShardInfo ClusterFamily::GetEmulatedShardInfo(ConnectionContext* cntx) co
166
163
info.replicas .push_back ({{.id = id_,
167
164
.ip = cntx->conn ()->LocalBindAddress (),
168
165
.port = static_cast <uint16_t >(absl::GetFlag (FLAGS_port))},
169
- NodeHealth::NONE });
166
+ NodeHealth::ONLINE });
170
167
}
171
168
172
169
return info;
@@ -196,7 +193,7 @@ void ClusterShardsImpl(const ClusterShardInfos& config, SinkReplyBuilder* builde
196
193
constexpr unsigned int kEntrySize = 4 ;
197
194
auto * rb = static_cast <RedisReplyBuilder*>(builder);
198
195
199
- auto WriteNode = [&](const ClusterNodeInfo & node, string_view role) {
196
+ auto WriteNode = [&](const ClusterExtendedNodeInfo & node, string_view role) {
200
197
constexpr unsigned int kNodeSize = 14 ;
201
198
rb->StartArray (kNodeSize );
202
199
rb->SendBulkString (" id" );
@@ -212,7 +209,7 @@ void ClusterShardsImpl(const ClusterShardInfos& config, SinkReplyBuilder* builde
212
209
rb->SendBulkString (" replication-offset" );
213
210
rb->SendLong (0 );
214
211
rb->SendBulkString (" health" );
215
- rb->SendBulkString (" online " );
212
+ rb->SendBulkString (ToString (node. health ) );
216
213
};
217
214
218
215
rb->StartArray (config.size ());
@@ -237,15 +234,22 @@ void ClusterShardsImpl(const ClusterShardInfos& config, SinkReplyBuilder* builde
237
234
} // namespace
238
235
239
236
void ClusterFamily::ClusterShards (SinkReplyBuilder* builder, ConnectionContext* cntx) {
240
- auto shard_infos = GetShardInfos (cntx);
241
- if (shard_infos) {
242
- return ClusterShardsImpl (*shard_infos, builder);
237
+ auto config = GetShardInfos (cntx);
238
+ if (config) {
239
+ // we need to remove hiden replicas
240
+ auto shards_info = config->Unwrap ();
241
+ for (auto & shard : shards_info) {
242
+ auto new_end = std::remove_if (shard.replicas .begin (), shard.replicas .end (),
243
+ [](const auto & r) { return r.health == NodeHealth::HIDDEN; });
244
+ shard.replicas .erase (new_end, shard.replicas .end ());
245
+ }
246
+ return ClusterShardsImpl ({shards_info}, builder);
243
247
}
244
248
return builder->SendError (kClusterNotConfigured );
245
249
}
246
250
247
251
namespace {
248
- void ClusterSlotsImpl (const ClusterShardInfos& config, SinkReplyBuilder* builder) {
252
+ void ClusterSlotsImpl (ClusterShardInfos config, SinkReplyBuilder* builder) {
249
253
// For more details https://redis.io/commands/cluster-slots/
250
254
auto * rb = static_cast <RedisReplyBuilder*>(builder);
251
255
@@ -258,10 +262,20 @@ void ClusterSlotsImpl(const ClusterShardInfos& config, SinkReplyBuilder* builder
258
262
};
259
263
260
264
unsigned int slot_ranges = 0 ;
261
- for (const auto & shard : config) {
265
+
266
+ // we need to remove hiden and fail replicas
267
+ auto shards_info = config.Unwrap ();
268
+ for (auto & shard : shards_info) {
262
269
slot_ranges += shard.slot_ranges .Size ();
270
+ auto new_end = std::remove_if (shard.replicas .begin (), shard.replicas .end (), [](const auto & r) {
271
+ return r.health == NodeHealth::HIDDEN || r.health == NodeHealth::FAIL ||
272
+ r.health == NodeHealth::LOADING;
273
+ });
274
+ shard.replicas .erase (new_end, shard.replicas .end ());
263
275
}
264
276
277
+ config = {shards_info};
278
+
265
279
rb->StartArray (slot_ranges);
266
280
for (const auto & shard : config) {
267
281
for (const auto & slot_range : shard.slot_ranges ) {
@@ -294,7 +308,7 @@ void ClusterNodesImpl(const ClusterShardInfos& config, string_view my_id,
294
308
295
309
string result;
296
310
297
- auto WriteNode = [&](const ClusterNodeInfo & node, string_view role, string_view master_id,
311
+ auto WriteNode = [&](const ClusterExtendedNodeInfo & node, string_view role, string_view master_id,
298
312
const SlotRanges& ranges) {
299
313
absl::StrAppend (&result, node.id , " " );
300
314
@@ -307,7 +321,8 @@ void ClusterNodesImpl(const ClusterShardInfos& config, string_view my_id,
307
321
308
322
absl::StrAppend (&result, master_id, " " );
309
323
310
- absl::StrAppend (&result, " 0 0 0 connected" );
324
+ absl::StrAppend (&result,
325
+ node.health != NodeHealth::FAIL ? " 0 0 0 connected" : " 0 0 0 disconnected" );
311
326
312
327
for (const auto & range : ranges) {
313
328
absl::StrAppend (&result, " " , range.start );
@@ -324,7 +339,9 @@ void ClusterNodesImpl(const ClusterShardInfos& config, string_view my_id,
324
339
WriteNode (shard.master , " master" , " -" , shard.slot_ranges );
325
340
for (const auto & replica : shard.replicas ) {
326
341
// Only the master prints ranges, so we send an empty set for replicas.
327
- WriteNode (replica, " slave" , shard.master .id , {});
342
+ if (replica.health != NodeHealth::HIDDEN) {
343
+ WriteNode (replica, " slave" , shard.master .id , {});
344
+ }
328
345
}
329
346
}
330
347
0 commit comments