Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[admin-tool][controller] Add new config into ZK and allow admin-tool to update the config #1418

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,9 @@ public static void main(String[] args) throws Exception {
case DUMP_HOST_HEARTBEAT:
dumpHostHeartbeat(cmd);
break;
case UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION:
updateAdminOperationProtocolVersion(cmd);
break;
default:
StringJoiner availableCommands = new StringJoiner(", ");
for (Command c: Command.values()) {
Expand Down Expand Up @@ -3146,6 +3149,16 @@ private static void dumpHostHeartbeat(CommandLine cmd) throws Exception {
}
}

private static void updateAdminOperationProtocolVersion(CommandLine cmd) throws Exception {
String clusterName = getRequiredArgument(cmd, Arg.CLUSTER, Command.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION);
String protocolVersionInString =
getRequiredArgument(cmd, Arg.ADMIN_OPERATION_PROTOCOL_VERSION, Command.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION);
long protocolVersion =
Utils.parseLongFromString(protocolVersionInString, Arg.ADMIN_OPERATION_PROTOCOL_VERSION.name());
ControllerResponse response = controllerClient.updateAdminOperationProtocolVersion(clusterName, protocolVersion);
printObject(response);
}

private static void migrateVeniceZKPaths(CommandLine cmd) throws Exception {
Set<String> clusterNames = Utils.parseCommaSeparatedStringToSet(getRequiredArgument(cmd, Arg.CLUSTER_LIST));
String srcZKUrl = getRequiredArgument(cmd, Arg.SRC_ZOOKEEPER_URL);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,10 @@ public enum Arg {
),
DAVINCI_HEARTBEAT_REPORTED(
"dvc-heartbeat-reported", "dvchb", true, "Flag to indicate whether DVC is bootstrapping and sending heartbeats"
), ENABLE_STORE_MIGRATION("enable-store-migration", "esm", true, "Toggle store migration store config");
), ENABLE_STORE_MIGRATION("enable-store-migration", "esm", true, "Toggle store migration store config"),
ADMIN_OPERATION_PROTOCOL_VERSION(
"admin-operation-protocol-version", "aopv", true, "Admin operation protocol version"
);

private final String argName;
private final String first;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import static com.linkedin.venice.Arg.ACCESS_CONTROL;
import static com.linkedin.venice.Arg.ACL_PERMS;
import static com.linkedin.venice.Arg.ACTIVE_ACTIVE_REPLICATION_ENABLED;
import static com.linkedin.venice.Arg.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.Arg.ALLOW_STORE_MIGRATION;
import static com.linkedin.venice.Arg.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED;
import static com.linkedin.venice.Arg.BACKUP_FOLDER;
Expand Down Expand Up @@ -562,6 +563,10 @@ public enum Command {
"dump-host-heartbeat",
"Dump all heartbeat belong to a certain storage node. You can use topic/partition to filter specific resource, and you can choose to filter resources that are lagging.",
new Arg[] { SERVER_URL, KAFKA_TOPIC_NAME }, new Arg[] { PARTITION, LAG_FILTER_ENABLED }
),
UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION(
"update-admin-operation-protocol-version", "Update the admin operation protocol version",
new Arg[] { URL, CLUSTER, ADMIN_OPERATION_PROTOCOL_VERSION }
);

private final String commandName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,4 +331,16 @@ public void testAdminConfigureView() throws ParseException, IOException {
CommandLine finalCommandLine = commandLine;
Assert.assertThrows(() -> AdminTool.getConfigureStoreViewQueryParams(finalCommandLine));
}

@Test
public void testUpdateAdminOperationProtocolVersion() throws ParseException, IOException {
String[] args = { "--update-admin-operation-protocol-version", "--url", "http://localhost:7036", "--cluster",
"test-cluster", "--admin-operation-protocol-version", "1" };

try {
AdminTool.main(args);
} catch (Exception e) {
Assert.fail("AdminTool should allow admin topic metadata to be updated admin operation version", e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ public class AdminTopicMetadataResponse extends ControllerResponse {
*/
private long upstreamOffset = -1;

/**
* The current admin operation protocol version, which is cluster-level and be SOT for serialize/deserialize admin operation message
*/
private long adminOperationProtocolVersion = -1;

public long getExecutionId() {
return executionId;
}
Expand All @@ -41,4 +46,12 @@ public void setOffset(long offset) {
public void setUpstreamOffset(long upstreamOffset) {
this.upstreamOffset = upstreamOffset;
}

public void setAdminOperationProtocolVersion(long adminOperationProtocolVersion) {
this.adminOperationProtocolVersion = adminOperationProtocolVersion;
}

public long getAdminOperationProtocolVersion() {
return adminOperationProtocolVersion;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ public class ControllerApiConstants {
public static final String KAFKA_TOPIC_RETENTION_IN_MS = "kafka.topic.retention.in.ms";
public static final String KAFKA_TOPIC_MIN_IN_SYNC_REPLICA = "kafka.topic.min.in.sync.replica";
public static final String UPSTREAM_OFFSET = "upstream_offset";
public static final String ADMIN_OPERATION_PROTOCOL_VERSION = "admin_operation_protocol_version";

public static final String PERSONA_NAME = "persona_name";
public static final String PERSONA_OWNERS = "persona_owners";
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.linkedin.venice.controllerapi;

import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_PERMISSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.BATCH_JOB_HEARTBEAT_ENABLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.CLUSTER;
Expand Down Expand Up @@ -1356,6 +1357,14 @@ public ControllerResponse updateAdminTopicMetadata(
return request(ControllerRoute.UPDATE_ADMIN_TOPIC_METADATA, params, ControllerResponse.class);
}

public ControllerResponse updateAdminOperationProtocolVersion(
String clusterName,
Long adminOperationProtocolVersion) {
QueryParams params =
newParams().add(CLUSTER, clusterName).add(ADMIN_OPERATION_PROTOCOL_VERSION, adminOperationProtocolVersion);
return request(ControllerRoute.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION, params, ControllerResponse.class);
}

public ControllerResponse deleteKafkaTopic(String topicName) {
QueryParams params = newParams().add(TOPIC, topicName);
return request(ControllerRoute.DELETE_KAFKA_TOPIC, params, ControllerResponse.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_PERMISSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_STRATEGY;
Expand Down Expand Up @@ -284,6 +285,10 @@ public enum ControllerRoute {
UPDATE_ADMIN_TOPIC_METADATA(
"/update_admin_topic_metadata", HttpMethod.POST, Arrays.asList(CLUSTER, EXECUTION_ID), NAME, OFFSET,
UPSTREAM_OFFSET
),
UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION(
"/update_admin_operation_protocol_version", HttpMethod.POST,
Arrays.asList(CLUSTER, ADMIN_OPERATION_PROTOCOL_VERSION)
), DELETE_KAFKA_TOPIC("/delete_kafka_topic", HttpMethod.POST, Arrays.asList(CLUSTER, TOPIC)),

CREATE_STORAGE_PERSONA(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -966,6 +966,8 @@ void updateAdminTopicMetadata(
Optional<Long> offset,
Optional<Long> upstreamOffset);

void updateAdminOperationProtocolVersion(String clusterName, Long adminOperationProtocolVersion);

void createStoragePersona(
String clusterName,
String name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,23 @@ public abstract class AdminTopicMetadataAccessor {
*/
private static final String UPSTREAM_OFFSET_KEY = "upstreamOffset";
private static final String EXECUTION_ID_KEY = "executionId";
private static final String ADMIN_OPERATION_PROTOCOL_VERSION_KEY = "adminOperationProtocolVersion";
private static final long UNDEFINED_VALUE = -1;

/**
* @return a map with {@linkplain AdminTopicMetadataAccessor#OFFSET_KEY}, {@linkplain AdminTopicMetadataAccessor#UPSTREAM_OFFSET_KEY},
* {@linkplain AdminTopicMetadataAccessor#EXECUTION_ID_KEY} specified to input values.
* {@linkplain AdminTopicMetadataAccessor#EXECUTION_ID_KEY}, {@linkplain AdminTopicMetadataAccessor#ADMIN_OPERATION_PROTOCOL_VERSION_KEY} specified to input values.
*/
public static Map<String, Long> generateMetadataMap(long localOffset, long upstreamOffset, long executionId) {
public static Map<String, Long> generateMetadataMap(
long localOffset,
long upstreamOffset,
long executionId,
long adminOperationProtocolVersion) {
Map<String, Long> metadata = new HashMap<>();
metadata.put(OFFSET_KEY, localOffset);
metadata.put(UPSTREAM_OFFSET_KEY, upstreamOffset);
metadata.put(EXECUTION_ID_KEY, executionId);
metadata.put(ADMIN_OPERATION_PROTOCOL_VERSION_KEY, adminOperationProtocolVersion);
return metadata;
}

Expand All @@ -45,6 +51,10 @@ public static long getExecutionId(Map<String, Long> metadata) {
return metadata.getOrDefault(EXECUTION_ID_KEY, UNDEFINED_VALUE);
}

public static long getAdminOperationProtocolVersion(Map<String, Long> metadata) {
return metadata.getOrDefault(ADMIN_OPERATION_PROTOCOL_VERSION_KEY, UNDEFINED_VALUE);
}

/**
* Update all relevant metadata for a given cluster in a single transaction.
* @param clusterName of the cluster at interest.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7119,13 +7119,15 @@ public Optional<AdminCommandExecutionTracker> getAdminCommandExecutionTracker(St
* @return cluster-level execution id, offset and upstream offset. If store name is specified, it returns store-level execution id.
*/
public Map<String, Long> getAdminTopicMetadata(String clusterName, Optional<String> storeName) {
Map<String, Long> metadata = adminConsumerServices.get(clusterName).getAdminTopicMetadata(clusterName);
if (storeName.isPresent()) {
Long executionId = executionIdAccessor.getLastSucceededExecutionIdMap(clusterName).get(storeName.get());
Long adminOperationProtocolVersion = AdminTopicMetadataAccessor.getAdminOperationProtocolVersion(metadata);
return executionId == null
? Collections.emptyMap()
: AdminTopicMetadataAccessor.generateMetadataMap(-1, -1, executionId);
: AdminTopicMetadataAccessor.generateMetadataMap(-1, -1, executionId, adminOperationProtocolVersion);
}
return adminConsumerServices.get(clusterName).getAdminTopicMetadata(clusterName);
return metadata;
}

/**
Expand All @@ -7138,17 +7140,43 @@ public void updateAdminTopicMetadata(
Optional<String> storeName,
Optional<Long> offset,
Optional<Long> upstreamOffset) {
Map<String, Long> metadata = adminConsumerServices.get(clusterName).getAdminTopicMetadata(clusterName);
if (storeName.isPresent()) {
executionIdAccessor.updateLastSucceededExecutionIdMap(clusterName, storeName.get(), executionId);
} else {
if (!offset.isPresent() || !upstreamOffset.isPresent()) {
throw new VeniceException("Offsets must be provided to update cluster-level admin topic metadata");
}

long currentAdminOperationProtocolVersion = AdminTopicMetadataAccessor.getAdminOperationProtocolVersion(metadata);
adminConsumerServices.get(clusterName)
.updateAdminTopicMetadata(clusterName, executionId, offset.get(), upstreamOffset.get());
.updateAdminTopicMetadata(
clusterName,
executionId,
offset.get(),
upstreamOffset.get(),
currentAdminOperationProtocolVersion);
}
}

/**
* Update the version of admin operation protocol in admin topic metadata
*/
public void updateAdminOperationProtocolVersion(String clusterName, Long adminOperationProtocolVersion) {
Map<String, Long> metadata = adminConsumerServices.get(clusterName).getAdminTopicMetadata(clusterName);

Pair<Long, Long> currentOffsets = AdminTopicMetadataAccessor.getOffsets(metadata);
Long executionId = AdminTopicMetadataAccessor.getExecutionId(metadata);

adminConsumerServices.get(clusterName)
.updateAdminTopicMetadata(
clusterName,
executionId,
currentOffsets.getFirst(),
currentOffsets.getSecond(),
adminOperationProtocolVersion);
}

/**
* @see Admin#getRoutersClusterConfig(String)
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4278,6 +4278,14 @@ public void updateAdminTopicMetadata(
throw new VeniceUnsupportedOperationException("updateAdminTopicMetadata");
}

/**
* Unsupported operation in the parent controller.
*/
@Override
public void updateAdminOperationProtocolVersion(String clusterName, Long adminOperationProtocolVersion) {
throw new VeniceUnsupportedOperationException("updateAdminTopicMetadata");
}

/**
* Unsupported operation in the parent controller.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,15 @@ public Map<String, Long> getAdminTopicMetadata(String clusterName) {
/**
* Update cluster-level execution id, offset, and upstream offset in a child colo.
*/
public void updateAdminTopicMetadata(String clusterName, long executionId, long offset, long upstreamOffset) {
public void updateAdminTopicMetadata(
String clusterName,
long executionId,
long offset,
long upstreamOffset,
long adminOperationProtocolVersion) {
if (clusterName.equals(config.getClusterName())) {
Map<String, Long> metadata = AdminTopicMetadataAccessor.generateMetadataMap(offset, upstreamOffset, executionId);
Map<String, Long> metadata = AdminTopicMetadataAccessor
.generateMetadataMap(offset, upstreamOffset, executionId, adminOperationProtocolVersion);
adminTopicMetadataAccessor.updateMetadata(clusterName, metadata);
} else {
throw new VeniceException(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -875,11 +875,20 @@ private void persistAdminTopicMetadata() {
// Skip since there are no new admin messages processed.
return;
}

Map<String, Long> metaData = adminTopicMetadataAccessor.getMetadata(clusterName);
Long currentAdminOperationProtocolVersion = AdminTopicMetadataAccessor.getAdminOperationProtocolVersion(metaData);
Map<String, Long> metadata = remoteConsumptionEnabled
? AdminTopicMetadataAccessor
.generateMetadataMap(localOffsetCheckpointAtStartTime, lastOffset, lastDelegatedExecutionId)
: AdminTopicMetadataAccessor
.generateMetadataMap(lastOffset, upstreamOffsetCheckpointAtStartTime, lastDelegatedExecutionId);
? AdminTopicMetadataAccessor.generateMetadataMap(
localOffsetCheckpointAtStartTime,
lastOffset,
lastDelegatedExecutionId,
currentAdminOperationProtocolVersion)
: AdminTopicMetadataAccessor.generateMetadataMap(
lastOffset,
upstreamOffsetCheckpointAtStartTime,
lastDelegatedExecutionId,
currentAdminOperationProtocolVersion);
adminTopicMetadataAccessor.updateMetadata(clusterName, metadata);
lastPersistedOffset = lastOffset;
lastPersistedExecutionId = lastDelegatedExecutionId;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@
import static com.linkedin.venice.controllerapi.ControllerRoute.STORE;
import static com.linkedin.venice.controllerapi.ControllerRoute.STORE_MIGRATION_ALLOWED;
import static com.linkedin.venice.controllerapi.ControllerRoute.UPDATE_ACL;
import static com.linkedin.venice.controllerapi.ControllerRoute.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerRoute.UPDATE_ADMIN_TOPIC_METADATA;
import static com.linkedin.venice.controllerapi.ControllerRoute.UPDATE_CLUSTER_CONFIG;
import static com.linkedin.venice.controllerapi.ControllerRoute.UPDATE_KAFKA_TOPIC_LOG_COMPACTION;
Expand Down Expand Up @@ -616,6 +617,9 @@ public boolean startInner() throws Exception {
httpService.post(
UPDATE_ADMIN_TOPIC_METADATA.getPath(),
new VeniceParentControllerRegionStateHandler(admin, adminTopicMetadataRoutes.updateAdminTopicMetadata(admin)));
httpService.post(
UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION.getPath(),
new VeniceParentControllerRegionStateHandler(admin, adminTopicMetadataRoutes.updateAdminTopicMetadata(admin)));

httpService.post(
DELETE_KAFKA_TOPIC.getPath(),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package com.linkedin.venice.controller.server;

import static com.linkedin.venice.controllerapi.ControllerApiConstants.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.CLUSTER;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.EXECUTION_ID;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.NAME;
Expand Down Expand Up @@ -47,6 +48,8 @@ public Route getAdminTopicMetadata(Admin admin) {
Map<String, Long> metadata = admin.getAdminTopicMetadata(clusterName, storeName);

responseObject.setExecutionId(AdminTopicMetadataAccessor.getExecutionId(metadata));
responseObject
.setAdminOperationProtocolVersion(AdminTopicMetadataAccessor.getAdminOperationProtocolVersion(metadata));
if (!storeName.isPresent()) {
Pair<Long, Long> offsets = AdminTopicMetadataAccessor.getOffsets(metadata);
responseObject.setOffset(offsets.getFirst());
Expand Down Expand Up @@ -103,4 +106,32 @@ public Route updateAdminTopicMetadata(Admin admin) {
return AdminSparkServer.OBJECT_MAPPER.writeValueAsString(responseObject);
};
}

public Route updateAdminOperationProtocolVersion(Admin admin) {
return (request, response) -> {
ControllerResponse responseObject = new ControllerResponse();
response.type(HttpConstants.JSON);
try {
if (!isAllowListUser(request)) {
response.status(HttpStatus.SC_FORBIDDEN);
responseObject.setError("Only admin users are allowed to run " + request.url());
responseObject.setErrorType(ErrorType.BAD_REQUEST);
return AdminSparkServer.OBJECT_MAPPER.writeValueAsString(responseObject);
}

AdminSparkServer.validateParams(request, UPDATE_ADMIN_TOPIC_METADATA.getParams(), admin);
String clusterName = request.queryParams(CLUSTER);
Long adminOperationProtocolVersion = Long.parseLong(request.queryParams(ADMIN_OPERATION_PROTOCOL_VERSION));

responseObject.setCluster(clusterName);

admin.updateAdminOperationProtocolVersion(clusterName, adminOperationProtocolVersion);

} catch (Throwable e) {
responseObject.setError(e);
AdminSparkServer.handleError(new VeniceException(e), request, response);
}
return AdminSparkServer.OBJECT_MAPPER.writeValueAsString(responseObject);
};
}
}
Loading
Loading