Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[admin-tool][controller] Add new config into ZK and allow admin-tool to update the config #1418

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -597,6 +597,9 @@ public static void main(String[] args) throws Exception {
case CLUSTER_BATCH_TASK:
clusterBatchTask(cmd);
break;
case UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION:
updateAdminOperationProtocolVersion(cmd);
break;
default:
StringJoiner availableCommands = new StringJoiner(", ");
for (Command c: Command.values()) {
Expand Down Expand Up @@ -3270,6 +3273,16 @@ private static void dumpHostHeartbeat(CommandLine cmd) throws Exception {
}
}

private static void updateAdminOperationProtocolVersion(CommandLine cmd) throws Exception {
String clusterName = getRequiredArgument(cmd, Arg.CLUSTER, Command.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION);
String protocolVersionInString =
getRequiredArgument(cmd, Arg.ADMIN_OPERATION_PROTOCOL_VERSION, Command.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION);
long protocolVersion =
Utils.parseLongFromString(protocolVersionInString, Arg.ADMIN_OPERATION_PROTOCOL_VERSION.name());
ControllerResponse response = controllerClient.updateAdminOperationProtocolVersion(clusterName, protocolVersion);
printObject(response);
}

private static void migrateVeniceZKPaths(CommandLine cmd) throws Exception {
Set<String> clusterNames = Utils.parseCommaSeparatedStringToSet(getRequiredArgument(cmd, Arg.CLUSTER_LIST));
String srcZKUrl = getRequiredArgument(cmd, Arg.SRC_ZOOKEEPER_URL);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,10 @@ public enum Arg {
),
DAVINCI_HEARTBEAT_REPORTED(
"dvc-heartbeat-reported", "dvchb", true, "Flag to indicate whether DVC is bootstrapping and sending heartbeats"
), ENABLE_STORE_MIGRATION("enable-store-migration", "esm", true, "Toggle store migration store config");
), ENABLE_STORE_MIGRATION("enable-store-migration", "esm", true, "Toggle store migration store config"),
ADMIN_OPERATION_PROTOCOL_VERSION(
"admin-operation-protocol-version", "aopv", true, "Admin operation protocol version"
);

private final String argName;
private final String first;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import static com.linkedin.venice.Arg.ACCESS_CONTROL;
import static com.linkedin.venice.Arg.ACL_PERMS;
import static com.linkedin.venice.Arg.ACTIVE_ACTIVE_REPLICATION_ENABLED;
import static com.linkedin.venice.Arg.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.Arg.ALLOW_STORE_MIGRATION;
import static com.linkedin.venice.Arg.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED;
import static com.linkedin.venice.Arg.BACKUP_FOLDER;
Expand Down Expand Up @@ -582,6 +583,10 @@ public enum Command {
"dump-host-heartbeat",
"Dump all heartbeat belong to a certain storage node. You can use topic/partition to filter specific resource, and you can choose to filter resources that are lagging.",
new Arg[] { SERVER_URL, KAFKA_TOPIC_NAME }, new Arg[] { PARTITION, LAG_FILTER_ENABLED }
),
UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION(
"update-admin-operation-protocol-version", "Update the admin operation protocol version",
new Arg[] { URL, CLUSTER, ADMIN_OPERATION_PROTOCOL_VERSION }
);

private final String commandName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -412,4 +412,16 @@ public void testAdminConfigureView() throws ParseException, IOException {
CommandLine finalCommandLine = commandLine;
Assert.assertThrows(() -> AdminTool.getConfigureStoreViewQueryParams(finalCommandLine));
}

@Test
public void testUpdateAdminOperationProtocolVersion() throws ParseException, IOException {
String[] args = { "--update-admin-operation-protocol-version", "--url", "http://localhost:7036", "--cluster",
"test-cluster", "--admin-operation-protocol-version", "1" };

try {
AdminTool.main(args);
} catch (Exception e) {
Assert.fail("AdminTool should allow admin topic metadata to be updated admin operation version", e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ public class AdminTopicMetadataResponse extends ControllerResponse {
*/
private long upstreamOffset = -1;

/**
* The current admin operation protocol version, which is cluster-level and be SOT for serialize/deserialize admin operation message
*/
private long adminOperationProtocolVersion = -1;

public long getExecutionId() {
return executionId;
}
Expand All @@ -41,4 +46,12 @@ public void setOffset(long offset) {
public void setUpstreamOffset(long upstreamOffset) {
this.upstreamOffset = upstreamOffset;
}

public void setAdminOperationProtocolVersion(long adminOperationProtocolVersion) {
this.adminOperationProtocolVersion = adminOperationProtocolVersion;
}

public long getAdminOperationProtocolVersion() {
return adminOperationProtocolVersion;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ public class ControllerApiConstants {
public static final String KAFKA_TOPIC_RETENTION_IN_MS = "kafka.topic.retention.in.ms";
public static final String KAFKA_TOPIC_MIN_IN_SYNC_REPLICA = "kafka.topic.min.in.sync.replica";
public static final String UPSTREAM_OFFSET = "upstream_offset";
public static final String ADMIN_OPERATION_PROTOCOL_VERSION = "admin_operation_protocol_version";

public static final String PERSONA_NAME = "persona_name";
public static final String PERSONA_OWNERS = "persona_owners";
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.linkedin.venice.controllerapi;

import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_PERMISSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.BATCH_JOB_HEARTBEAT_ENABLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.CLUSTER;
Expand Down Expand Up @@ -1362,6 +1363,14 @@ public ControllerResponse updateAdminTopicMetadata(
return request(ControllerRoute.UPDATE_ADMIN_TOPIC_METADATA, params, ControllerResponse.class);
}

public ControllerResponse updateAdminOperationProtocolVersion(
String clusterName,
Long adminOperationProtocolVersion) {
QueryParams params =
newParams().add(CLUSTER, clusterName).add(ADMIN_OPERATION_PROTOCOL_VERSION, adminOperationProtocolVersion);
return request(ControllerRoute.UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION, params, ControllerResponse.class);
}

public ControllerResponse deleteKafkaTopic(String topicName) {
QueryParams params = newParams().add(TOPIC, topicName);
return request(ControllerRoute.DELETE_KAFKA_TOPIC, params, ControllerResponse.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_PERMISSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.ADMIN_OPERATION_PROTOCOL_VERSION;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED;
import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_STRATEGY;
Expand Down Expand Up @@ -284,6 +285,10 @@ public enum ControllerRoute {
UPDATE_ADMIN_TOPIC_METADATA(
"/update_admin_topic_metadata", HttpMethod.POST, Arrays.asList(CLUSTER, EXECUTION_ID), NAME, OFFSET,
UPSTREAM_OFFSET
),
UPDATE_ADMIN_OPERATION_PROTOCOL_VERSION(
"/update_admin_operation_protocol_version", HttpMethod.POST,
Arrays.asList(CLUSTER, ADMIN_OPERATION_PROTOCOL_VERSION)
), DELETE_KAFKA_TOPIC("/delete_kafka_topic", HttpMethod.POST, Arrays.asList(CLUSTER, TOPIC)),

CREATE_STORAGE_PERSONA(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,5 @@ message AdminTopicGrpcMetadata {
optional string storeName = 3;
optional int64 offset = 4;
optional int64 upstreamOffset = 5;
optional int64 adminOperationProtocolVersion = 6;
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import com.linkedin.venice.AdminTool;
import com.linkedin.venice.Arg;
import com.linkedin.venice.controllerapi.AdminTopicMetadataResponse;
import com.linkedin.venice.controllerapi.ControllerClient;
import com.linkedin.venice.controllerapi.MultiStoreResponse;
import com.linkedin.venice.controllerapi.NewStoreResponse;
Expand All @@ -19,14 +20,18 @@
import com.linkedin.venice.integration.utils.ServiceFactory;
import com.linkedin.venice.integration.utils.VeniceClusterCreateOptions;
import com.linkedin.venice.integration.utils.VeniceClusterWrapper;
import com.linkedin.venice.integration.utils.VeniceControllerWrapper;
import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions;
import com.linkedin.venice.integration.utils.VeniceServerWrapper;
import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper;
import com.linkedin.venice.meta.Version;
import com.linkedin.venice.pubsub.PubSubTopicRepository;
import com.linkedin.venice.pubsub.api.PubSubTopic;
import com.linkedin.venice.pubsub.manager.TopicManager;
import com.linkedin.venice.utils.TestUtils;
import com.linkedin.venice.utils.Time;
import com.linkedin.venice.utils.Utils;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.helix.zookeeper.impl.client.ZkClient;
Expand Down Expand Up @@ -186,4 +191,43 @@ public void testNodeReplicasReadinessCommand() throws Exception {
clusterName, "--storage-node", Utils.getHelixNodeIdentifier(Utils.getHostName(), server.getPort()) };
AdminTool.main(nodeReplicasReadinessArgs);
}

@Test(timeOut = 4 * TEST_TIMEOUT)
public void testUpdateAdminOperationVersion() throws Exception {
Long currentVersion = -1L;
Long newVersion = 80L;
try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice =
ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(
new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1)
.numberOfClusters(1)
.numberOfParentControllers(1)
.numberOfChildControllers(1)
.numberOfServers(1)
.numberOfRouters(1)
.replicationFactor(1)
.build());) {
String clusterName = venice.getClusterNames()[0];

// Get the parent con†roller
VeniceControllerWrapper controller = venice.getParentControllers().get(0);
ControllerClient controllerClient = new ControllerClient(clusterName, controller.getControllerUrl());

// Setup the original metadata
AdminTopicMetadataResponse originalMetadata = controllerClient.getAdminTopicMetadata(Optional.empty());
Assert.assertEquals(originalMetadata.getAdminOperationProtocolVersion(), (long) currentVersion);

// Update the admin operation version to newVersion - 80
String[] updateAdminOperationVersionArgs =
{ "--update-admin-operation-protocol-version", "--url", controller.getControllerUrl(), "--cluster",
clusterName, "--admin-operation-protocol-version", newVersion.toString() };

AdminTool.main(updateAdminOperationVersionArgs);

// Verify the admin operation metadata version is updated
TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> {
AdminTopicMetadataResponse updatedMetadata = controllerClient.getAdminTopicMetadata(Optional.empty());
Assert.assertEquals(updatedMetadata.getAdminOperationProtocolVersion(), (long) newVersion);
});
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
import com.linkedin.venice.writer.VeniceWriter;
import com.linkedin.venice.writer.VeniceWriterOptions;
import java.io.IOException;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
Expand Down Expand Up @@ -196,6 +198,51 @@ public void testParallelAdminExecutionTasks() throws IOException, InterruptedExc
}
}

@Test(timeOut = 2 * TIMEOUT)
public void testUpdateAdminOperationVersion() {
Long currentVersion = -1L;
Long newVersion = 18L;
try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice =
ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(
new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1)
.numberOfClusters(1)
.numberOfParentControllers(1)
.numberOfChildControllers(1)
.numberOfServers(1)
.numberOfRouters(1)
.replicationFactor(1)
.build())) {

String clusterName = venice.getClusterNames()[0];

// Get the child controller
VeniceControllerWrapper controller = venice.getChildRegions().get(0).getLeaderController(clusterName);
Admin admin = controller.getVeniceAdmin();

AdminConsumerService adminConsumerService = controller.getAdminConsumerServiceByCluster(clusterName);

// Setup the original metadata
adminConsumerService.updateAdminOperationProtocolVersion(clusterName, currentVersion);

// Verify that the original metadata is correct
TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> {
Map<String, Long> adminTopicMetadata = admin.getAdminTopicMetadata(clusterName, Optional.empty());
Assert.assertTrue(adminTopicMetadata.containsKey("adminOperationProtocolVersion"));
Assert.assertEquals(adminTopicMetadata.get("adminOperationProtocolVersion"), currentVersion);
});

// Update the admin operation version
admin.updateAdminOperationProtocolVersion(clusterName, newVersion);

// Verify the admin operation metadata version is updated
TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> {
Map<String, Long> adminTopicMetadata = admin.getAdminTopicMetadata(clusterName, Optional.empty());
Assert.assertTrue(adminTopicMetadata.containsKey("adminOperationProtocolVersion"));
Assert.assertEquals(adminTopicMetadata.get("adminOperationProtocolVersion"), newVersion);
});
}
}

private Runnable getRunnable(
VeniceTwoLayerMultiRegionMultiClusterWrapper venice,
String storeName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -953,6 +953,8 @@ void updateAdminTopicMetadata(
Optional<Long> offset,
Optional<Long> upstreamOffset);

void updateAdminOperationProtocolVersion(String clusterName, Long adminOperationProtocolVersion);

void createStoragePersona(
String clusterName,
String name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import com.linkedin.venice.utils.Pair;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;


public abstract class AdminTopicMetadataAccessor {
Expand All @@ -14,20 +15,47 @@ public abstract class AdminTopicMetadataAccessor {
*/
private static final String UPSTREAM_OFFSET_KEY = "upstreamOffset";
private static final String EXECUTION_ID_KEY = "executionId";
private static final String ADMIN_OPERATION_PROTOCOL_VERSION_KEY = "adminOperationProtocolVersion";
private static final long UNDEFINED_VALUE = -1;

/**
* @return a map with {@linkplain AdminTopicMetadataAccessor#OFFSET_KEY}, {@linkplain AdminTopicMetadataAccessor#UPSTREAM_OFFSET_KEY},
* {@linkplain AdminTopicMetadataAccessor#EXECUTION_ID_KEY} specified to input values.
* {@linkplain AdminTopicMetadataAccessor#EXECUTION_ID_KEY}, {@linkplain AdminTopicMetadataAccessor#ADMIN_OPERATION_PROTOCOL_VERSION_KEY}
* specified to input values.
*/
public static Map<String, Long> generateMetadataMap(long localOffset, long upstreamOffset, long executionId) {
public static Map<String, Long> generateMetadataMap(
Optional<Long> localOffset,
Optional<Long> upstreamOffset,
Optional<Long> executionId,
Optional<Long> adminOperationProtocolVersion) {
Map<String, Long> metadata = new HashMap<>();
metadata.put(OFFSET_KEY, localOffset);
metadata.put(UPSTREAM_OFFSET_KEY, upstreamOffset);
metadata.put(EXECUTION_ID_KEY, executionId);
localOffset.ifPresent(offset -> metadata.put(OFFSET_KEY, offset));
upstreamOffset.ifPresent(offset -> metadata.put(UPSTREAM_OFFSET_KEY, offset));
executionId.ifPresent(id -> metadata.put(EXECUTION_ID_KEY, id));
adminOperationProtocolVersion.ifPresent(version -> metadata.put(ADMIN_OPERATION_PROTOCOL_VERSION_KEY, version));
return metadata;
}

public static Map<String, Long> generateMetadataMap(long localOffset, long upstreamOffset, long executionId) {
return generateMetadataMap(
Optional.of(localOffset),
Optional.of(upstreamOffset),
Optional.of(executionId),
Optional.empty());
}

public static Map<String, Long> generateMetadataMap(
long localOffset,
long upstreamOffset,
long executionId,
long adminOperationProtocolVersion) {
return generateMetadataMap(
Optional.of(localOffset),
Optional.of(upstreamOffset),
Optional.of(executionId),
Optional.of(adminOperationProtocolVersion));
}

/**
* @return a pair of values to which the specified keys are mapped to {@linkplain AdminTopicMetadataAccessor#OFFSET_KEY}
* and {@linkplain AdminTopicMetadataAccessor#UPSTREAM_OFFSET_KEY}.
Expand All @@ -46,7 +74,14 @@ public static long getExecutionId(Map<String, Long> metadata) {
}

/**
* Update all relevant metadata for a given cluster in a single transaction.
* @return the value to which the specified key is mapped to {@linkplain AdminTopicMetadataAccessor#ADMIN_OPERATION_PROTOCOL_VERSION_KEY}.
*/
public static long getAdminOperationProtocolVersion(Map<String, Long> metadata) {
return metadata.getOrDefault(ADMIN_OPERATION_PROTOCOL_VERSION_KEY, UNDEFINED_VALUE);
}

/**
* Update all relevant metadata for a given cluster in a single transaction with information provided in metadata.
* @param clusterName of the cluster at interest.
* @param metadata map containing relevant information.
*/
Expand Down
Loading
Loading