Skip to content

Commit 3a99bd9

Browse files
authored
[test] Fix race condition in DaVinciClientTest (#1361)
There exists a race condition in DaVinciClientTest where tests with different schemas running concurrently can cause VeniceInconsistentSchemaException. This is because all of the tests in this file share the same directory, and the validation logic reads the first schema file in the directory. The fix for this is to create a new temp directory for each test to avoid collisions.
1 parent 1b40264 commit 3a99bd9

File tree

2 files changed

+16
-9
lines changed

2 files changed

+16
-9
lines changed

clients/da-vinci-client/src/main/java/com/linkedin/davinci/kafka/consumer/StoreIngestionTask.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3562,7 +3562,6 @@ private int processKafkaDataMessage(
35623562
keyLen = keyBytes.length;
35633563
// update checksum for this PUT message if needed.
35643564
partitionConsumptionState.maybeUpdateExpectedChecksum(keyBytes, put);
3565-
35663565
if (metricsEnabled && recordLevelMetricEnabled.get() && put.getSchemaId() == CHUNK_MANIFEST_SCHEMA_ID) {
35673566
// This must be done before the recordTransformer modifies the putValue, otherwise the size will be incorrect.
35683567
recordAssembledRecordSize(keyLen, put.getPutValue(), put.getReplicationMetadataPayload(), currentTimeMs);

internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/DaVinciClientTest.java

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -144,14 +144,10 @@ public class DaVinciClientTest {
144144
private VeniceClusterWrapper cluster;
145145
private D2Client d2Client;
146146
private PubSubProducerAdapterFactory pubSubProducerAdapterFactory;
147-
private File inputDir;
148-
private String inputDirPath;
149147

150148
@BeforeClass
151149
public void setUp() {
152150
Utils.thisIsLocalhost();
153-
inputDir = getTempDataDirectory();
154-
inputDirPath = "file://" + inputDir.getAbsolutePath();
155151
Properties clusterConfig = new Properties();
156152
clusterConfig.put(SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, 1L);
157153
clusterConfig.put(PUSH_STATUS_STORE_ENABLED, true);
@@ -1616,6 +1612,8 @@ private void setUpStore(
16161612
boolean chunkingEnabled = false;
16171613
CompressionStrategy compressionStrategy = CompressionStrategy.NO_OP;
16181614

1615+
File inputDir = getTempDataDirectory();
1616+
16191617
Runnable writeAvroFileRunnable = () -> {
16201618
try {
16211619
writeSimpleAvroFileWithIntToStringSchema(inputDir);
@@ -1632,7 +1630,8 @@ private void setUpStore(
16321630
chunkingEnabled,
16331631
compressionStrategy,
16341632
writeAvroFileRunnable,
1635-
valueSchema);
1633+
valueSchema,
1634+
inputDir);
16361635
}
16371636

16381637
/*
@@ -1649,6 +1648,9 @@ private void setUpStore(
16491648
int numKeys) {
16501649
Consumer<UpdateStoreQueryParams> paramsConsumer = params -> {};
16511650
Consumer<Properties> propertiesConsumer = properties -> {};
1651+
1652+
File inputDir = getTempDataDirectory();
1653+
16521654
Runnable writeAvroFileRunnable = () -> {
16531655
try {
16541656
writeSimpleAvroFileWithIntToStringSchema(inputDir, customValue, numKeys);
@@ -1665,7 +1667,8 @@ private void setUpStore(
16651667
chunkingEnabled,
16661668
compressionStrategy,
16671669
writeAvroFileRunnable,
1668-
valueSchema);
1670+
valueSchema,
1671+
inputDir);
16691672
}
16701673

16711674
/*
@@ -1681,6 +1684,8 @@ private void setUpStore(
16811684
int numKeys) {
16821685
Consumer<UpdateStoreQueryParams> paramsConsumer = params -> {};
16831686
Consumer<Properties> propertiesConsumer = properties -> {};
1687+
1688+
File inputDir = getTempDataDirectory();
16841689
Runnable writeAvroFileRunnable = () -> {
16851690
try {
16861691
writeSimpleAvroFileWithIntToIntSchema(inputDir, numKeys);
@@ -1697,7 +1702,8 @@ private void setUpStore(
16971702
chunkingEnabled,
16981703
compressionStrategy,
16991704
writeAvroFileRunnable,
1700-
valueSchema);
1705+
valueSchema,
1706+
inputDir);
17011707
}
17021708

17031709
private void setUpStore(
@@ -1708,11 +1714,13 @@ private void setUpStore(
17081714
boolean chunkingEnabled,
17091715
CompressionStrategy compressionStrategy,
17101716
Runnable writeAvroFileRunnable,
1711-
String valueSchema) {
1717+
String valueSchema,
1718+
File inputDir) {
17121719
// Produce input data.
17131720
writeAvroFileRunnable.run();
17141721

17151722
// Setup VPJ job properties.
1723+
String inputDirPath = "file://" + inputDir.getAbsolutePath();
17161724
Properties vpjProperties = defaultVPJProps(cluster, inputDirPath, storeName);
17171725
propertiesConsumer.accept(vpjProperties);
17181726
// Create & update store for test.

0 commit comments

Comments
 (0)