Skip to content

Commit 62b738e

Browse files
Backport to branch(3.15) : Rename parameters from ScalarDB to ScalarDb (#2643)
Co-authored-by: inv-jishnu <[email protected]>
1 parent f1850d3 commit 62b738e

21 files changed

+119
-128
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
package com.scalar.db.dataloader.core;
22

33
/** The available modes a ScalarDB instance can run in */
4-
public enum ScalarDBMode {
4+
public enum ScalarDbMode {
55
STORAGE,
66
TRANSACTION
77
}

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/CsvExportManager.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import com.scalar.db.api.DistributedStorage;
44
import com.scalar.db.api.TableMetadata;
55
import com.scalar.db.dataloader.core.dataexport.producer.ProducerTaskFactory;
6-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDao;
6+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
77
import com.scalar.db.dataloader.core.util.CsvUtil;
88
import com.scalar.db.transaction.consensuscommit.ConsensusCommitUtils;
99
import java.io.IOException;
@@ -13,7 +13,7 @@
1313

1414
public class CsvExportManager extends ExportManager {
1515
public CsvExportManager(
16-
DistributedStorage storage, ScalarDBDao dao, ProducerTaskFactory producerTaskFactory) {
16+
DistributedStorage storage, ScalarDbDao dao, ProducerTaskFactory producerTaskFactory) {
1717
super(storage, dao, producerTaskFactory);
1818
}
1919

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/ExportManager.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
import com.scalar.db.dataloader.core.dataexport.producer.ProducerTaskFactory;
1010
import com.scalar.db.dataloader.core.dataexport.validation.ExportOptionsValidationException;
1111
import com.scalar.db.dataloader.core.dataexport.validation.ExportOptionsValidator;
12-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDao;
13-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDaoException;
12+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
13+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDaoException;
1414
import com.scalar.db.dataloader.core.util.TableMetadataUtil;
1515
import com.scalar.db.io.DataType;
1616
import java.io.BufferedWriter;
@@ -33,7 +33,7 @@ public abstract class ExportManager {
3333
private static final Logger logger = LoggerFactory.getLogger(ExportManager.class);
3434

3535
private final DistributedStorage storage;
36-
private final ScalarDBDao dao;
36+
private final ScalarDbDao dao;
3737
private final ProducerTaskFactory producerTaskFactory;
3838
private final Object lock = new Object();
3939

@@ -115,7 +115,7 @@ public ExportReport startExport(
115115
} finally {
116116
bufferedWriter.flush();
117117
}
118-
} catch (ExportOptionsValidationException | IOException | ScalarDBDaoException e) {
118+
} catch (ExportOptionsValidationException | IOException | ScalarDbDaoException e) {
119119
logger.error("Error during export: {}", e.getMessage());
120120
}
121121
return exportReport;
@@ -215,11 +215,11 @@ private void handleTransactionMetadata(ExportOptions exportOptions, TableMetadat
215215
* @param dao ScalarDB dao object
216216
* @param storage distributed storage object
217217
* @return created scanner
218-
* @throws ScalarDBDaoException throws if any issue occurs in creating scanner object
218+
* @throws ScalarDbDaoException throws if any issue occurs in creating scanner object
219219
*/
220220
private Scanner createScanner(
221-
ExportOptions exportOptions, ScalarDBDao dao, DistributedStorage storage)
222-
throws ScalarDBDaoException {
221+
ExportOptions exportOptions, ScalarDbDao dao, DistributedStorage storage)
222+
throws ScalarDbDaoException {
223223
boolean isScanAll = exportOptions.getScanPartitionKey() == null;
224224
if (isScanAll) {
225225
return dao.createScanner(

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/JsonExportManager.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@
33
import com.scalar.db.api.DistributedStorage;
44
import com.scalar.db.api.TableMetadata;
55
import com.scalar.db.dataloader.core.dataexport.producer.ProducerTaskFactory;
6-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDao;
6+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
77
import java.io.IOException;
88
import java.io.Writer;
99

1010
public class JsonExportManager extends ExportManager {
1111
public JsonExportManager(
12-
DistributedStorage storage, ScalarDBDao dao, ProducerTaskFactory producerTaskFactory) {
12+
DistributedStorage storage, ScalarDbDao dao, ProducerTaskFactory producerTaskFactory) {
1313
super(storage, dao, producerTaskFactory);
1414
}
1515

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/JsonLineExportManager.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@
33
import com.scalar.db.api.DistributedStorage;
44
import com.scalar.db.api.TableMetadata;
55
import com.scalar.db.dataloader.core.dataexport.producer.ProducerTaskFactory;
6-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDao;
6+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
77
import java.io.IOException;
88
import java.io.Writer;
99

1010
public class JsonLineExportManager extends ExportManager {
1111
public JsonLineExportManager(
12-
DistributedStorage storage, ScalarDBDao dao, ProducerTaskFactory producerTaskFactory) {
12+
DistributedStorage storage, ScalarDbDao dao, ProducerTaskFactory producerTaskFactory) {
1313
super(storage, dao, producerTaskFactory);
1414
}
1515

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/ImportManager.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
import com.scalar.db.api.DistributedStorage;
44
import com.scalar.db.api.DistributedTransactionManager;
55
import com.scalar.db.api.TableMetadata;
6-
import com.scalar.db.dataloader.core.ScalarDBMode;
7-
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDBDao;
6+
import com.scalar.db.dataloader.core.ScalarDbMode;
7+
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
88
import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus;
99
import com.scalar.db.dataloader.core.dataimport.processor.ImportProcessor;
1010
import com.scalar.db.dataloader.core.dataimport.processor.ImportProcessorFactory;
@@ -43,7 +43,7 @@ public class ImportManager implements ImportEventListener {
4343
@NonNull private final ImportOptions importOptions;
4444
private final ImportProcessorFactory importProcessorFactory;
4545
private final List<ImportEventListener> listeners = new ArrayList<>();
46-
private final ScalarDBMode scalarDBMode;
46+
private final ScalarDbMode scalarDbMode;
4747
private final DistributedStorage distributedStorage;
4848
private final DistributedTransactionManager distributedTransactionManager;
4949
private final ConcurrentHashMap<Integer, ImportDataChunkStatus> importDataChunkStatusMap =
@@ -62,10 +62,10 @@ public class ImportManager implements ImportEventListener {
6262
public ConcurrentHashMap<Integer, ImportDataChunkStatus> startImport() {
6363
ImportProcessorParams params =
6464
ImportProcessorParams.builder()
65-
.scalarDBMode(scalarDBMode)
65+
.scalarDbMode(scalarDbMode)
6666
.importOptions(importOptions)
6767
.tableMetadataByTableName(tableMetadata)
68-
.dao(new ScalarDBDao())
68+
.dao(new ScalarDbDao())
6969
.distributedTransactionManager(distributedTransactionManager)
7070
.distributedStorage(distributedStorage)
7171
.tableColumnDataTypes(getTableColumnDataTypes())

data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDao.java renamed to data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDbDao.java

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@
2626
import org.slf4j.LoggerFactory;
2727

2828
/** The generic DAO that is used to scan ScalarDB data */
29-
public class ScalarDBDao {
29+
public class ScalarDbDao {
3030

3131
/* Class logger */
32-
private static final Logger logger = LoggerFactory.getLogger(ScalarDBDao.class);
32+
private static final Logger logger = LoggerFactory.getLogger(ScalarDbDao.class);
3333
private static final String GET_COMPLETED_MSG = "GET completed for %s";
3434
private static final String PUT_COMPLETED_MSG = "PUT completed for %s";
3535
private static final String SCAN_START_MSG = "SCAN started...";
@@ -44,15 +44,15 @@ public class ScalarDBDao {
4444
* @param clusteringKey Optional clustering key for get
4545
* @param storage Distributed storage for ScalarDB connection that is running in storage mode.
4646
* @return Optional get result
47-
* @throws ScalarDBDaoException if something goes wrong while reading the data
47+
* @throws ScalarDbDaoException if something goes wrong while reading the data
4848
*/
4949
public Optional<Result> get(
5050
String namespace,
5151
String table,
5252
Key partitionKey,
5353
Key clusteringKey,
5454
DistributedStorage storage)
55-
throws ScalarDBDaoException {
55+
throws ScalarDbDaoException {
5656

5757
// Retrieving the key data for logging
5858
String loggingKey = keysToString(partitionKey, clusteringKey);
@@ -63,7 +63,7 @@ public Optional<Result> get(
6363
logger.info(String.format(GET_COMPLETED_MSG, loggingKey));
6464
return result;
6565
} catch (ExecutionException e) {
66-
throw new ScalarDBDaoException("error GET " + loggingKey, e);
66+
throw new ScalarDbDaoException("error GET " + loggingKey, e);
6767
}
6868
}
6969

@@ -76,15 +76,15 @@ public Optional<Result> get(
7676
* @param clusteringKey Optional clustering key for get
7777
* @param transaction ScalarDB transaction instance
7878
* @return Optional get result
79-
* @throws ScalarDBDaoException if something goes wrong while reading the data
79+
* @throws ScalarDbDaoException if something goes wrong while reading the data
8080
*/
8181
public Optional<Result> get(
8282
String namespace,
8383
String table,
8484
Key partitionKey,
8585
Key clusteringKey,
8686
DistributedTransaction transaction)
87-
throws ScalarDBDaoException {
87+
throws ScalarDbDaoException {
8888

8989
Get get = createGetWith(namespace, table, partitionKey, clusteringKey);
9090
// Retrieving the key data for logging
@@ -94,7 +94,7 @@ public Optional<Result> get(
9494
logger.info(String.format(GET_COMPLETED_MSG, loggingKey));
9595
return result;
9696
} catch (CrudException e) {
97-
throw new ScalarDBDaoException("error GET " + loggingKey, e.getCause());
97+
throw new ScalarDbDaoException("error GET " + loggingKey, e.getCause());
9898
}
9999
}
100100

@@ -107,7 +107,7 @@ public Optional<Result> get(
107107
* @param clusteringKey Optional clustering key
108108
* @param columns List of column values to be inserted or updated
109109
* @param transaction ScalarDB transaction instance
110-
* @throws ScalarDBDaoException if something goes wrong while executing the transaction
110+
* @throws ScalarDbDaoException if something goes wrong while executing the transaction
111111
*/
112112
public void put(
113113
String namespace,
@@ -116,13 +116,13 @@ public void put(
116116
Key clusteringKey,
117117
List<Column<?>> columns,
118118
DistributedTransaction transaction)
119-
throws ScalarDBDaoException {
119+
throws ScalarDbDaoException {
120120

121121
Put put = createPutWith(namespace, table, partitionKey, clusteringKey, columns);
122122
try {
123123
transaction.put(put);
124124
} catch (CrudException e) {
125-
throw new ScalarDBDaoException(
125+
throw new ScalarDbDaoException(
126126
CoreError.DATA_LOADER_ERROR_CRUD_EXCEPTION.buildMessage(e.getMessage()), e);
127127
}
128128
logger.info(String.format(PUT_COMPLETED_MSG, keysToString(partitionKey, clusteringKey)));
@@ -137,7 +137,7 @@ public void put(
137137
* @param clusteringKey Optional clustering key
138138
* @param columns List of column values to be inserted or updated
139139
* @param storage Distributed storage for ScalarDB connection that is running in storage mode
140-
* @throws ScalarDBDaoException if something goes wrong while executing the transaction
140+
* @throws ScalarDbDaoException if something goes wrong while executing the transaction
141141
*/
142142
public void put(
143143
String namespace,
@@ -146,12 +146,12 @@ public void put(
146146
Key clusteringKey,
147147
List<Column<?>> columns,
148148
DistributedStorage storage)
149-
throws ScalarDBDaoException {
149+
throws ScalarDbDaoException {
150150
Put put = createPutWith(namespace, table, partitionKey, clusteringKey, columns);
151151
try {
152152
storage.put(put);
153153
} catch (ExecutionException e) {
154-
throw new ScalarDBDaoException(
154+
throw new ScalarDbDaoException(
155155
CoreError.DATA_LOADER_ERROR_CRUD_EXCEPTION.buildMessage(e.getMessage()), e);
156156
}
157157
logger.info(String.format(PUT_COMPLETED_MSG, keysToString(partitionKey, clusteringKey)));
@@ -169,7 +169,7 @@ public void put(
169169
* @param limit Scan limit value
170170
* @param storage Distributed storage for ScalarDB connection that is running in storage mode
171171
* @return List of ScalarDB scan results
172-
* @throws ScalarDBDaoException if scan fails
172+
* @throws ScalarDbDaoException if scan fails
173173
*/
174174
public List<Result> scan(
175175
String namespace,
@@ -180,7 +180,7 @@ public List<Result> scan(
180180
List<String> projections,
181181
int limit,
182182
DistributedStorage storage)
183-
throws ScalarDBDaoException {
183+
throws ScalarDbDaoException {
184184
// Create scan
185185
Scan scan = createScan(namespace, table, partitionKey, range, sorts, projections, limit);
186186

@@ -193,7 +193,7 @@ public List<Result> scan(
193193
return allResults;
194194
}
195195
} catch (ExecutionException | IOException e) {
196-
throw new ScalarDBDaoException(
196+
throw new ScalarDbDaoException(
197197
CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e);
198198
}
199199
}
@@ -211,7 +211,7 @@ public List<Result> scan(
211211
* @param transaction Distributed Transaction manager for ScalarDB connection that is * running in
212212
* transaction mode
213213
* @return List of ScalarDB scan results
214-
* @throws ScalarDBDaoException if scan fails
214+
* @throws ScalarDbDaoException if scan fails
215215
*/
216216
public List<Result> scan(
217217
String namespace,
@@ -222,7 +222,7 @@ public List<Result> scan(
222222
List<String> projections,
223223
int limit,
224224
DistributedTransaction transaction)
225-
throws ScalarDBDaoException {
225+
throws ScalarDbDaoException {
226226

227227
// Create scan
228228
Scan scan = createScan(namespace, table, partitionKey, range, sorts, projections, limit);
@@ -236,7 +236,7 @@ public List<Result> scan(
236236
} catch (CrudException | NoSuchElementException e) {
237237
// No such element Exception is thrown when the scan is done in transaction mode but
238238
// ScalarDB is running in storage mode
239-
throw new ScalarDBDaoException(
239+
throw new ScalarDbDaoException(
240240
CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e);
241241
}
242242
}
@@ -250,21 +250,21 @@ public List<Result> scan(
250250
* @param limit Scan limit value
251251
* @param storage Distributed storage for ScalarDB connection that is running in storage mode
252252
* @return ScalarDB Scanner object
253-
* @throws ScalarDBDaoException if scan fails
253+
* @throws ScalarDbDaoException if scan fails
254254
*/
255255
public Scanner createScanner(
256256
String namespace,
257257
String table,
258258
List<String> projectionColumns,
259259
int limit,
260260
DistributedStorage storage)
261-
throws ScalarDBDaoException {
261+
throws ScalarDbDaoException {
262262
Scan scan =
263263
createScan(namespace, table, null, null, new ArrayList<>(), projectionColumns, limit);
264264
try {
265265
return storage.scan(scan);
266266
} catch (ExecutionException e) {
267-
throw new ScalarDBDaoException(
267+
throw new ScalarDbDaoException(
268268
CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e);
269269
}
270270
}
@@ -281,7 +281,7 @@ public Scanner createScanner(
281281
* @param limit Scan limit value
282282
* @param storage Distributed storage for ScalarDB connection that is running in storage mode
283283
* @return ScalarDB Scanner object
284-
* @throws ScalarDBDaoException if scan fails
284+
* @throws ScalarDbDaoException if scan fails
285285
*/
286286
public Scanner createScanner(
287287
String namespace,
@@ -292,13 +292,13 @@ public Scanner createScanner(
292292
@Nullable List<String> projectionColumns,
293293
int limit,
294294
DistributedStorage storage)
295-
throws ScalarDBDaoException {
295+
throws ScalarDbDaoException {
296296
Scan scan =
297297
createScan(namespace, table, partitionKey, scanRange, sortOrders, projectionColumns, limit);
298298
try {
299299
return storage.scan(scan);
300300
} catch (ExecutionException e) {
301-
throw new ScalarDBDaoException(
301+
throw new ScalarDbDaoException(
302302
CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e);
303303
}
304304
}
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
package com.scalar.db.dataloader.core.dataimport.dao;
22

33
/** A custom DAO exception that encapsulates errors thrown by ScalarDB operations */
4-
public class ScalarDBDaoException extends Exception {
4+
public class ScalarDbDaoException extends Exception {
55

66
/**
77
* Class constructor
88
*
99
* @param message error message
1010
* @param cause reason for exception
1111
*/
12-
public ScalarDBDaoException(String message, Throwable cause) {
12+
public ScalarDbDaoException(String message, Throwable cause) {
1313
super(message, cause);
1414
}
1515
}

0 commit comments

Comments
 (0)