Skip to content

Commit d8d9399

Browse files
gaojieliuxunyin8
andauthored
Hotfix 0.4.348 (#1314)
* [server] SN read quota versioned stats not initialized after restart (#1312) The currentVersion and backupVersion of ServerReadQuotaUsageStats are not set after server restart because handleStoreChanged is invoked for all stores when the store repo undergoing refresh before we initialize and register store change listener in ReadQuotaEnforcementHandler (part of the ListenerService). As a result metrics that depend on current and backup versions will not show up properly until store is updated. The fix is to during initialization of ReadQuotaEnforcementHandler we will invoke handleStoreChanged for all stores after we register store change listener. The bug is actually reproducible in existing integration test. However, it was not caught because the test was broken/misconfigured... * [client-common] Added safeguard for compressor (#1307) * [client-common] Added safeguard for compressor Today, the `compress`/`decompress` can still be invoked even the compressor is closed already and for zstd based compressor, it would crash. This PR add some safeguard and fail fast if the compressor is already closed. * Fixed integration test failures * Minor tweak * Added a unit test * Fixed minor comment * Skipped locking for NoopCompressor --------- Co-authored-by: Xun Yin <[email protected]>
1 parent 878304f commit d8d9399

File tree

9 files changed

+177
-25
lines changed

9 files changed

+177
-25
lines changed

internal/venice-client-common/src/main/java/com/linkedin/venice/compression/GzipCompressor.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ public GzipCompressor() {
2424
}
2525

2626
@Override
27-
public byte[] compress(byte[] data) throws IOException {
27+
protected byte[] compressInternal(byte[] data) throws IOException {
2828
ReusableGzipOutputStream out = gzipPool.getReusableGzipOutputStream();
2929
try {
3030
out.writeHeader();
@@ -37,7 +37,7 @@ public byte[] compress(byte[] data) throws IOException {
3737
}
3838

3939
@Override
40-
public void close() throws IOException {
40+
protected void closeInternal() throws IOException {
4141
try {
4242
gzipPool.close();
4343
} catch (Exception e) {
@@ -47,7 +47,7 @@ public void close() throws IOException {
4747
}
4848

4949
@Override
50-
public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IOException {
50+
protected ByteBuffer compressInternal(ByteBuffer data, int startPositionOfOutput) throws IOException {
5151
/**
5252
* N.B.: We initialize the size of buffer in this output stream at the size of the deflated payload, which is not
5353
* ideal, but not necessarily bad either. The assumption is that GZIP usually doesn't compress our payloads that
@@ -74,7 +74,7 @@ public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IO
7474
}
7575

7676
@Override
77-
public ByteBuffer decompress(ByteBuffer data) throws IOException {
77+
protected ByteBuffer decompressInternal(ByteBuffer data) throws IOException {
7878
if (data.hasRemaining()) {
7979
if (data.hasArray()) {
8080
return decompress(data.array(), data.position(), data.remaining());
@@ -89,14 +89,14 @@ public ByteBuffer decompress(ByteBuffer data) throws IOException {
8989
}
9090

9191
@Override
92-
public ByteBuffer decompress(byte[] data, int offset, int length) throws IOException {
92+
protected ByteBuffer decompressInternal(byte[] data, int offset, int length) throws IOException {
9393
try (InputStream gis = decompress(new ByteArrayInputStream(data, offset, length))) {
9494
return ByteBuffer.wrap(IOUtils.toByteArray(gis));
9595
}
9696
}
9797

9898
@Override
99-
public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int length, int schemaHeader)
99+
protected ByteBuffer decompressAndPrependSchemaHeaderInternal(byte[] data, int offset, int length, int schemaHeader)
100100
throws IOException {
101101
byte[] decompressedByteArray;
102102
try (InputStream gis = decompress(new ByteArrayInputStream(data, offset, length))) {
@@ -111,7 +111,7 @@ public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int
111111
}
112112

113113
@Override
114-
public InputStream decompress(InputStream inputStream) throws IOException {
114+
protected InputStream decompressInternal(InputStream inputStream) throws IOException {
115115
return new GZIPInputStream(inputStream);
116116
}
117117

internal/venice-client-common/src/main/java/com/linkedin/venice/compression/NoopCompressor.java

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
import java.nio.ByteBuffer;
77

88

9+
/**
10+
* Locking is not necessary for {@link NoopCompressor}, so this class overrides all the public APIs to avoid locking.
11+
*/
912
public class NoopCompressor extends VeniceCompressor {
1013
public NoopCompressor() {
1114
super(CompressionStrategy.NO_OP);
@@ -16,6 +19,11 @@ public byte[] compress(byte[] data) throws IOException {
1619
return data;
1720
}
1821

22+
@Override
23+
protected byte[] compressInternal(byte[] data) throws IOException {
24+
throw new UnsupportedOperationException("compressInternal");
25+
}
26+
1927
@Override
2028
public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IOException {
2129
if (startPositionOfOutput != 0) {
@@ -24,6 +32,11 @@ public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IO
2432
return data;
2533
}
2634

35+
@Override
36+
protected ByteBuffer compressInternal(ByteBuffer src, int startPositionOfOutput) throws IOException {
37+
throw new UnsupportedOperationException("compressInternal");
38+
}
39+
2740
@Override
2841
public int hashCode() {
2942
return super.hashCode();
@@ -34,11 +47,21 @@ public ByteBuffer decompress(ByteBuffer data) throws IOException {
3447
return data;
3548
}
3649

50+
@Override
51+
protected ByteBuffer decompressInternal(ByteBuffer data) throws IOException {
52+
throw new UnsupportedOperationException("decompressInternal");
53+
}
54+
3755
@Override
3856
public ByteBuffer decompress(byte[] data, int offset, int length) throws IOException {
3957
return ByteBuffer.wrap(data, offset, length);
4058
}
4159

60+
@Override
61+
protected ByteBuffer decompressInternal(byte[] data, int offset, int length) throws IOException {
62+
throw new UnsupportedOperationException("decompressInternal");
63+
}
64+
4265
@Override
4366
public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int length, int schemaHeader)
4467
throws IOException {
@@ -50,11 +73,32 @@ public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int
5073
return bb;
5174
}
5275

76+
@Override
77+
protected ByteBuffer decompressAndPrependSchemaHeaderInternal(byte[] data, int offset, int length, int schemaHeader)
78+
throws IOException {
79+
throw new UnsupportedOperationException("decompressAndPrependSchemaHeaderInternal");
80+
}
81+
5382
@Override
5483
public InputStream decompress(InputStream inputStream) throws IOException {
5584
return inputStream;
5685
}
5786

87+
@Override
88+
protected InputStream decompressInternal(InputStream inputStream) throws IOException {
89+
throw new UnsupportedOperationException("decompressInternal");
90+
}
91+
92+
@Override
93+
public void close() throws IOException {
94+
// do nothing
95+
}
96+
97+
@Override
98+
protected void closeInternal() throws IOException {
99+
throw new UnsupportedOperationException("closeInternal");
100+
}
101+
58102
@Override
59103
public boolean equals(Object o) {
60104
if (o == this) {
Lines changed: 67 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,103 @@
11
package com.linkedin.venice.compression;
22

3+
import com.linkedin.venice.exceptions.VeniceException;
34
import com.linkedin.venice.utils.ByteUtils;
45
import java.io.Closeable;
56
import java.io.IOException;
67
import java.io.InputStream;
78
import java.nio.ByteBuffer;
9+
import java.util.concurrent.locks.ReentrantReadWriteLock;
810

911

1012
public abstract class VeniceCompressor implements Closeable {
1113
protected static final int SCHEMA_HEADER_LENGTH = ByteUtils.SIZE_OF_INT;
1214
private final CompressionStrategy compressionStrategy;
15+
private boolean isClosed = false;
16+
/**
17+
* To avoid the race condition between 'compress'/'decompress' operation and 'close'.
18+
*/
19+
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
1320

1421
protected VeniceCompressor(CompressionStrategy compressionStrategy) {
1522
this.compressionStrategy = compressionStrategy;
1623
}
1724

18-
public abstract byte[] compress(byte[] data) throws IOException;
25+
interface CompressionRunnable<R> {
26+
R run() throws IOException;
27+
}
28+
29+
private <R> R executeWithSafeGuard(CompressionRunnable<R> runnable) throws IOException {
30+
readWriteLock.readLock().lock();
31+
try {
32+
if (isClosed) {
33+
throw new VeniceException("Compressor for " + getCompressionStrategy() + " has been closed");
34+
}
35+
return runnable.run();
36+
} finally {
37+
readWriteLock.readLock().unlock();
38+
}
39+
}
40+
41+
public byte[] compress(byte[] data) throws IOException {
42+
return executeWithSafeGuard(() -> compressInternal(data));
43+
}
1944

20-
public abstract ByteBuffer compress(ByteBuffer src, int startPositionOfOutput) throws IOException;
45+
protected abstract byte[] compressInternal(byte[] data) throws IOException;
2146

22-
public abstract ByteBuffer decompress(ByteBuffer data) throws IOException;
47+
public ByteBuffer compress(ByteBuffer src, int startPositionOfOutput) throws IOException {
48+
return executeWithSafeGuard(() -> compressInternal(src, startPositionOfOutput));
49+
}
2350

24-
public abstract ByteBuffer decompress(byte[] data, int offset, int length) throws IOException;
51+
protected abstract ByteBuffer compressInternal(ByteBuffer src, int startPositionOfOutput) throws IOException;
52+
53+
public ByteBuffer decompress(ByteBuffer data) throws IOException {
54+
return executeWithSafeGuard(() -> decompressInternal(data));
55+
}
56+
57+
protected abstract ByteBuffer decompressInternal(ByteBuffer data) throws IOException;
58+
59+
public ByteBuffer decompress(byte[] data, int offset, int length) throws IOException {
60+
return executeWithSafeGuard(() -> decompressInternal(data, offset, length));
61+
}
62+
63+
protected abstract ByteBuffer decompressInternal(byte[] data, int offset, int length) throws IOException;
2564

2665
/**
2766
* This method tries to decompress data and maybe prepend the schema header.
2867
* The returned ByteBuffer will be backed by byte array that starts with schema header, followed by the
2968
* decompressed data. The ByteBuffer will be positioned at the beginning of the decompressed data and the remaining of
3069
* the ByteBuffer will be the length of the decompressed data.
3170
*/
32-
public abstract ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int length, int schemaHeader)
33-
throws IOException;
71+
public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int length, int schemaHeader)
72+
throws IOException {
73+
return executeWithSafeGuard(() -> decompressAndPrependSchemaHeaderInternal(data, offset, length, schemaHeader));
74+
}
75+
76+
protected abstract ByteBuffer decompressAndPrependSchemaHeaderInternal(
77+
byte[] data,
78+
int offset,
79+
int length,
80+
int schemaHeader) throws IOException;
3481

3582
public CompressionStrategy getCompressionStrategy() {
3683
return compressionStrategy;
3784
}
3885

39-
public abstract InputStream decompress(InputStream inputStream) throws IOException;
86+
public InputStream decompress(InputStream inputStream) throws IOException {
87+
return executeWithSafeGuard(() -> decompressInternal(inputStream));
88+
}
89+
90+
protected abstract InputStream decompressInternal(InputStream inputStream) throws IOException;
4091

4192
public void close() throws IOException {
93+
readWriteLock.writeLock().lock();
94+
try {
95+
isClosed = true;
96+
closeInternal();
97+
} finally {
98+
readWriteLock.writeLock().unlock();
99+
}
42100
}
101+
102+
protected abstract void closeInternal() throws IOException;
43103
}

internal/venice-client-common/src/main/java/com/linkedin/venice/compression/ZstdWithDictCompressor.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,12 @@ public ZstdWithDictCompressor(final byte[] dictionary, int level) {
4747
}
4848

4949
@Override
50-
public byte[] compress(byte[] data) {
50+
protected byte[] compressInternal(byte[] data) {
5151
return compressor.get().compress(data);
5252
}
5353

5454
@Override
55-
public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IOException {
55+
protected ByteBuffer compressInternal(ByteBuffer data, int startPositionOfOutput) throws IOException {
5656
long maxDstSize = Zstd.compressBound(data.remaining());
5757
if (maxDstSize + startPositionOfOutput > Integer.MAX_VALUE) {
5858
throw new ZstdException(Zstd.errGeneric(), "Max output size is greater than Integer.MAX_VALUE");
@@ -87,7 +87,7 @@ public ByteBuffer compress(ByteBuffer data, int startPositionOfOutput) throws IO
8787
}
8888

8989
@Override
90-
public ByteBuffer decompress(ByteBuffer data) throws IOException {
90+
protected ByteBuffer decompressInternal(ByteBuffer data) throws IOException {
9191
if (data.hasRemaining()) {
9292
if (data.hasArray()) {
9393
return decompress(data.array(), data.position(), data.remaining());
@@ -107,7 +107,7 @@ public ByteBuffer decompress(ByteBuffer data) throws IOException {
107107
}
108108

109109
@Override
110-
public ByteBuffer decompress(byte[] data, int offset, int length) throws IOException {
110+
protected ByteBuffer decompressInternal(byte[] data, int offset, int length) throws IOException {
111111
int expectedSize = validateExpectedDecompressedSize(Zstd.decompressedSize(data, offset, length));
112112
ByteBuffer returnedData = ByteBuffer.allocate(expectedSize);
113113
int actualSize = decompressor.get()
@@ -124,7 +124,7 @@ public ByteBuffer decompress(byte[] data, int offset, int length) throws IOExcep
124124
}
125125

126126
@Override
127-
public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int length, int schemaHeader)
127+
protected ByteBuffer decompressAndPrependSchemaHeaderInternal(byte[] data, int offset, int length, int schemaHeader)
128128
throws IOException {
129129
int expectedDecompressedDataSize = validateExpectedDecompressedSize(Zstd.decompressedSize(data, offset, length));
130130

@@ -138,12 +138,12 @@ public ByteBuffer decompressAndPrependSchemaHeader(byte[] data, int offset, int
138138
}
139139

140140
@Override
141-
public InputStream decompress(InputStream inputStream) throws IOException {
141+
protected InputStream decompressInternal(InputStream inputStream) throws IOException {
142142
return new ZstdInputStream(inputStream).setDict(this.dictDecompress);
143143
}
144144

145145
@Override
146-
public void close() throws IOException {
146+
protected void closeInternal() throws IOException {
147147
this.compressor.close();
148148
this.decompressor.close();
149149
IOUtils.closeQuietly(this.dictCompress);

internal/venice-client-common/src/test/java/com/linkedin/venice/compression/TestVeniceCompressor.java

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
package com.linkedin.venice.compression;
22

3+
import static org.testng.Assert.assertThrows;
4+
import static org.testng.Assert.assertTrue;
5+
import static org.testng.Assert.expectThrows;
6+
37
import com.github.luben.zstd.Zstd;
8+
import com.linkedin.venice.exceptions.VeniceException;
49
import com.linkedin.venice.utils.ByteUtils;
510
import com.linkedin.venice.utils.TestUtils;
611
import com.linkedin.venice.utils.Time;
@@ -14,6 +19,7 @@
1419
import java.util.concurrent.Executors;
1520
import java.util.concurrent.Future;
1621
import java.util.concurrent.TimeUnit;
22+
import org.apache.commons.lang3.RandomStringUtils;
1723
import org.apache.logging.log4j.LogManager;
1824
import org.apache.logging.log4j.Logger;
1925
import org.testng.Assert;
@@ -173,7 +179,7 @@ private enum SourceDataType {
173179

174180
@Test
175181
public void testZSTDThrowsExceptionOnNullDictionary() {
176-
Assert.assertThrows(
182+
assertThrows(
177183
() -> new CompressorFactory()
178184
.createVersionSpecificCompressorIfNotExist(CompressionStrategy.ZSTD_WITH_DICT, "foo_v1", null));
179185
}
@@ -205,4 +211,15 @@ public void testCompressorEqual() {
205211
}
206212
}
207213
}
214+
215+
@Test
216+
public void testCompressorClose() throws IOException {
217+
VeniceCompressor compressor = new ZstdWithDictCompressor("abc".getBytes(), Zstd.maxCompressionLevel());
218+
String largePayload = RandomStringUtils.randomAlphabetic(500000);
219+
compressor.compress(largePayload.getBytes());
220+
compressor.close();
221+
VeniceException exception =
222+
expectThrows(VeniceException.class, () -> compressor.compress(ByteBuffer.wrap(largePayload.getBytes()), 4));
223+
assertTrue(exception.getMessage().contains("has been closed"));
224+
}
208225
}

internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/fastclient/FastClientIndividualFeatureConfigurationTest.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,10 @@ public void testServerReadQuota() throws Exception {
186186
LOGGER.info("RESTARTING servers");
187187
veniceCluster.stopAndRestartVeniceServer(veniceServerWrapper.getPort());
188188
}
189+
serverMetrics.clear();
190+
for (int i = 0; i < veniceCluster.getVeniceServers().size(); i++) {
191+
serverMetrics.add(veniceCluster.getVeniceServers().get(i).getMetricsRepository());
192+
}
189193
for (int j = 0; j < 5; j++) {
190194
for (int i = 0; i < recordCnt; i++) {
191195
String key = keyPrefix + i;
@@ -198,7 +202,7 @@ public void testServerReadQuota() throws Exception {
198202
quotaRequestedQPSSum += serverMetric.getMetric(readQuotaRequestedQPSString).value();
199203
assertEquals(serverMetric.getMetric(readQuotaAllowedUnintentionally).value(), 0d);
200204
}
201-
assertTrue(quotaRequestedQPSSum >= 0, "Quota request sum: " + quotaRequestedQPSSum);
205+
assertTrue(quotaRequestedQPSSum > 0, "Quota request sum: " + quotaRequestedQPSSum);
202206
}
203207

204208
@Test(timeOut = TIME_OUT)

services/venice-server/src/main/java/com/linkedin/venice/listener/ReadQuotaEnforcementHandler.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,8 @@ public final void init() {
173173
for (Version version: versions) {
174174
customizedViewRepository.subscribeRoutingDataChange(version.kafkaTopicName(), this);
175175
}
176+
// also invoke handle store change to ensure corresponding token bucket and stats are initialized.
177+
handleStoreChanged(store);
176178
}
177179
this.initializedVolatile = true;
178180
}

0 commit comments

Comments
 (0)