Skip to content

Commit 1448b2b

Browse files
authored
Replacing all instances of synchronized blocks with ReentrantLock (#3116)
1 parent 38a30dc commit 1448b2b

File tree

11 files changed

+168
-57
lines changed

11 files changed

+168
-57
lines changed

src/main/java/io/lettuce/core/cluster/PooledClusterConnectionProvider.java

Lines changed: 42 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
import java.util.concurrent.CompletionException;
2929
import java.util.concurrent.CopyOnWriteArrayList;
3030
import java.util.concurrent.ThreadLocalRandom;
31+
import java.util.concurrent.locks.Lock;
32+
import java.util.concurrent.locks.ReentrantLock;
3133
import java.util.function.Function;
3234
import java.util.stream.Collectors;
3335

@@ -63,6 +65,7 @@
6365
* @param <K> Key type.
6466
* @param <V> Value type.
6567
* @author Mark Paluch
68+
* @author Tihomir Mateev
6669
* @since 3.0
6770
*/
6871
@SuppressWarnings({ "unchecked", "rawtypes" })
@@ -72,7 +75,7 @@ class PooledClusterConnectionProvider<K, V>
7275
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PooledClusterConnectionProvider.class);
7376

7477
// Contains NodeId-identified and HostAndPort-identified connections.
75-
private final Object stateLock = new Object();
78+
private final Lock stateLock = new ReentrantLock();
7679

7780
private final boolean debugEnabled = logger.isDebugEnabled();
7881

@@ -156,8 +159,12 @@ public CompletableFuture<StatefulRedisConnection<K, V>> getConnectionAsync(Conne
156159
private CompletableFuture<StatefulRedisConnection<K, V>> getWriteConnection(int slot) {
157160

158161
CompletableFuture<StatefulRedisConnection<K, V>> writer;// avoid races when reconfiguring partitions.
159-
synchronized (stateLock) {
162+
163+
stateLock.lock();
164+
try {
160165
writer = writers[slot];
166+
} finally {
167+
stateLock.unlock();
161168
}
162169

163170
if (writer == null) {
@@ -177,10 +184,13 @@ private CompletableFuture<StatefulRedisConnection<K, V>> getWriteConnection(int
177184

178185
return future.thenApply(connection -> {
179186

180-
synchronized (stateLock) {
187+
stateLock.lock();
188+
try {
181189
if (writers[slot] == null) {
182190
writers[slot] = CompletableFuture.completedFuture(connection);
183191
}
192+
} finally {
193+
stateLock.unlock();
184194
}
185195

186196
return connection;
@@ -196,8 +206,11 @@ private CompletableFuture<StatefulRedisConnection<K, V>> getReadConnection(int s
196206

197207
boolean cached = true;
198208

199-
synchronized (stateLock) {
209+
stateLock.lock();
210+
try {
200211
readerCandidates = readers[slot];
212+
} finally {
213+
stateLock.unlock();
201214
}
202215

203216
if (readerCandidates == null) {
@@ -293,8 +306,12 @@ public Iterator<RedisNodeDescription> iterator() {
293306
for (int i = 0; i < toCache.length; i++) {
294307
toCache[i] = CompletableFuture.completedFuture(statefulRedisConnections[i]);
295308
}
296-
synchronized (stateLock) {
309+
310+
stateLock.lock();
311+
try {
297312
readers[slot] = toCache;
313+
} finally {
314+
stateLock.unlock();
298315
}
299316

300317
if (!orderSensitive) {
@@ -532,12 +549,15 @@ public void setPartitions(Partitions partitions) {
532549

533550
boolean reconfigurePartitions = false;
534551

535-
synchronized (stateLock) {
552+
stateLock.lock();
553+
try {
536554
if (this.partitions != null) {
537555
reconfigurePartitions = true;
538556
}
539557
this.partitions = partitions;
540558
this.connectionFactory.setPartitions(partitions);
559+
} finally {
560+
stateLock.unlock();
541561
}
542562

543563
if (reconfigurePartitions) {
@@ -601,8 +621,11 @@ private boolean isStale(ConnectionKey connectionKey) {
601621
@Override
602622
public void setAutoFlushCommands(boolean autoFlush) {
603623

604-
synchronized (stateLock) {
624+
stateLock.lock();
625+
try {
605626
this.autoFlushCommands = autoFlush;
627+
} finally {
628+
stateLock.unlock();
606629
}
607630

608631
connectionProvider.forEach(connection -> connection.setAutoFlushCommands(autoFlush));
@@ -616,9 +639,12 @@ public void flushCommands() {
616639
@Override
617640
public void setReadFrom(ReadFrom readFrom) {
618641

619-
synchronized (stateLock) {
642+
stateLock.lock();
643+
try {
620644
this.readFrom = readFrom;
621645
Arrays.fill(readers, null);
646+
} finally {
647+
stateLock.unlock();
622648
}
623649
}
624650

@@ -643,9 +669,12 @@ long getConnectionCount() {
643669
*/
644670
private void resetFastConnectionCache() {
645671

646-
synchronized (stateLock) {
672+
stateLock.lock();
673+
try {
647674
Arrays.fill(writers, null);
648675
Arrays.fill(readers, null);
676+
} finally {
677+
stateLock.unlock();
649678
}
650679
}
651680

@@ -719,9 +748,12 @@ public ConnectionFuture<StatefulRedisConnection<K, V>> apply(ConnectionKey key)
719748

720749
RedisClusterNode actualNode = targetNode;
721750
connection = connection.thenApply(c -> {
722-
synchronized (stateLock) {
751+
stateLock.lock();
752+
try {
723753
c.setAutoFlushCommands(autoFlushCommands);
724754
c.addListener(message -> onPushMessage(actualNode, message));
755+
} finally {
756+
stateLock.unlock();
725757
}
726758
return c;
727759
});

src/main/java/io/lettuce/core/cluster/models/partitions/Partitions.java

Lines changed: 40 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
import java.util.Collections;
2525
import java.util.Iterator;
2626
import java.util.List;
27+
import java.util.concurrent.locks.Lock;
28+
import java.util.concurrent.locks.ReentrantLock;
2729

2830
import io.lettuce.core.RedisURI;
2931
import io.lettuce.core.cluster.SlotHash;
@@ -62,6 +64,8 @@ public class Partitions implements Collection<RedisClusterNode> {
6264

6365
private static final RedisClusterNode[] EMPTY = new RedisClusterNode[SlotHash.SLOT_COUNT];
6466

67+
private final Lock lock = new ReentrantLock();
68+
6569
private final List<RedisClusterNode> partitions = new ArrayList<>();
6670

6771
private volatile RedisClusterNode[] slotCache = EMPTY;
@@ -166,8 +170,8 @@ private static boolean matches(RedisURI uri, String host, int port) {
166170
*/
167171
public void updateCache() {
168172

169-
synchronized (partitions) {
170-
173+
lock.lock();
174+
try {
171175
if (partitions.isEmpty()) {
172176
invalidateCache();
173177
return;
@@ -190,6 +194,8 @@ public void updateCache() {
190194
this.slotCache = slotCache;
191195
this.masterCache = masterCache;
192196
this.nodeReadView = Collections.unmodifiableCollection(readView);
197+
} finally {
198+
lock.unlock();
193199
}
194200
}
195201

@@ -232,9 +238,12 @@ public void addPartition(RedisClusterNode partition) {
232238

233239
LettuceAssert.notNull(partition, "Partition must not be null");
234240

235-
synchronized (partitions) {
241+
lock.lock();
242+
try {
236243
invalidateCache();
237244
partitions.add(partition);
245+
} finally {
246+
lock.unlock();
238247
}
239248
}
240249

@@ -265,10 +274,13 @@ public void reload(List<RedisClusterNode> partitions) {
265274

266275
LettuceAssert.noNullElements(partitions, "Partitions must not contain null elements");
267276

268-
synchronized (this.partitions) {
277+
lock.lock();
278+
try {
269279
this.partitions.clear();
270280
this.partitions.addAll(partitions);
271281
updateCache();
282+
} finally {
283+
lock.unlock();
272284
}
273285
}
274286

@@ -304,10 +316,13 @@ public boolean addAll(Collection<? extends RedisClusterNode> c) {
304316

305317
LettuceAssert.noNullElements(c, "Partitions must not contain null elements");
306318

307-
synchronized (partitions) {
319+
lock.lock();
320+
try {
308321
boolean b = partitions.addAll(c);
309322
updateCache();
310323
return b;
324+
} finally {
325+
lock.unlock();
311326
}
312327
}
313328

@@ -321,10 +336,13 @@ public boolean addAll(Collection<? extends RedisClusterNode> c) {
321336
@Override
322337
public boolean removeAll(Collection<?> c) {
323338

324-
synchronized (partitions) {
339+
lock.lock();
340+
try {
325341
boolean b = getPartitions().removeAll(c);
326342
updateCache();
327343
return b;
344+
} finally {
345+
lock.unlock();
328346
}
329347
}
330348

@@ -339,10 +357,13 @@ public boolean removeAll(Collection<?> c) {
339357
@Override
340358
public boolean retainAll(Collection<?> c) {
341359

342-
synchronized (partitions) {
360+
lock.lock();
361+
try {
343362
boolean b = getPartitions().retainAll(c);
344363
updateCache();
345364
return b;
365+
} finally {
366+
lock.unlock();
346367
}
347368
}
348369

@@ -352,9 +373,12 @@ public boolean retainAll(Collection<?> c) {
352373
@Override
353374
public void clear() {
354375

355-
synchronized (partitions) {
376+
lock.lock();
377+
try {
356378
getPartitions().clear();
357379
updateCache();
380+
} finally {
381+
lock.unlock();
358382
}
359383
}
360384

@@ -390,12 +414,15 @@ public <T> T[] toArray(T[] a) {
390414
@Override
391415
public boolean add(RedisClusterNode redisClusterNode) {
392416

393-
synchronized (partitions) {
417+
lock.lock();
418+
try {
394419
LettuceAssert.notNull(redisClusterNode, "RedisClusterNode must not be null");
395420

396421
boolean add = getPartitions().add(redisClusterNode);
397422
updateCache();
398423
return add;
424+
} finally {
425+
lock.unlock();
399426
}
400427
}
401428

@@ -408,10 +435,13 @@ public boolean add(RedisClusterNode redisClusterNode) {
408435
@Override
409436
public boolean remove(Object o) {
410437

411-
synchronized (partitions) {
438+
lock.lock();
439+
try {
412440
boolean remove = getPartitions().remove(o);
413441
updateCache();
414442
return remove;
443+
} finally {
444+
lock.unlock();
415445
}
416446
}
417447

src/main/java/io/lettuce/core/cluster/topology/DefaultClusterTopologyRefresh.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ public String getMessage() {
506506
}
507507

508508
@Override
509-
public synchronized Throwable fillInStackTrace() {
509+
public Throwable fillInStackTrace() {
510510
return this;
511511
}
512512

src/main/java/io/lettuce/core/event/command/CommandBaseEvent.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,7 @@ public RedisCommand<Object, Object, Object> getCommand() {
3232
* @return shared context.
3333
*/
3434
public Map<String, Object> getContext() {
35-
synchronized (this) {
36-
return context;
37-
}
35+
return context;
3836
}
3937

4038
@Override

src/main/java/io/lettuce/core/event/jfr/JfrEventRecorder.java

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import java.lang.reflect.Constructor;
44
import java.util.HashMap;
55
import java.util.Map;
6+
import java.util.concurrent.ConcurrentHashMap;
67

78
import io.lettuce.core.event.Event;
89
import io.lettuce.core.internal.LettuceAssert;
@@ -23,7 +24,7 @@
2324
*/
2425
class JfrEventRecorder implements EventRecorder {
2526

26-
private final Map<Class<?>, Constructor<?>> constructorMap = new HashMap<>();
27+
private final Map<Class<?>, Constructor<?>> constructorMap = new ConcurrentHashMap<>();
2728

2829
@Override
2930
public void record(Event event) {
@@ -54,11 +55,7 @@ public RecordableEvent start(Event event) {
5455

5556
private Constructor<?> getEventConstructor(Event event) throws NoSuchMethodException {
5657

57-
Constructor<?> constructor;
58-
59-
synchronized (constructorMap) {
60-
constructor = constructorMap.get(event.getClass());
61-
}
58+
Constructor<?> constructor = constructorMap.get(event.getClass());
6259

6360
if (constructor == null) {
6461

@@ -73,9 +70,7 @@ private Constructor<?> getEventConstructor(Event event) throws NoSuchMethodExcep
7370
constructor.setAccessible(true);
7471
}
7572

76-
synchronized (constructorMap) {
77-
constructorMap.put(event.getClass(), constructor);
78-
}
73+
constructorMap.put(event.getClass(), constructor);
7974
}
8075

8176
return constructor;

0 commit comments

Comments
 (0)