Skip to content

Commit

Permalink
Remove remaining vestiges of backup v1.
Browse files Browse the repository at this point in the history
  • Loading branch information
mattl-netflix committed Feb 29, 2024
1 parent 0c40cce commit 281cd17
Show file tree
Hide file tree
Showing 34 changed files with 342 additions and 2,086 deletions.
102 changes: 26 additions & 76 deletions priam/src/main/java/com/netflix/priam/aws/RemoteBackupPath.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,15 @@
import com.google.api.client.util.Lists;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.compress.CompressionType;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.InstanceIdentity;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import javax.inject.Inject;

/**
Expand All @@ -40,18 +36,13 @@
* this instance.
*/
public class RemoteBackupPath extends AbstractBackupPath {
private static final ImmutableSet<BackupFileType> V2_ONLY_FILE_TYPES =
ImmutableSet.of(
BackupFileType.META_V2,
BackupFileType.SST_V2,
BackupFileType.SECONDARY_INDEX_V2);

@Inject
public RemoteBackupPath(IConfiguration config, InstanceIdentity factory) {
super(config, factory);
}

private ImmutableList.Builder<String> getV2Prefix() {
private ImmutableList.Builder<String> getPrefix() {
ImmutableList.Builder<String> prefix = ImmutableList.builder();
prefix.add(baseDir, prependHash(clusterName), token);
return prefix;
Expand Down Expand Up @@ -82,8 +73,8 @@ private String removeHash(String appNameWithHash) {
* Another major difference w.r.t. V1 is having no distinction between SNAP and SST files as we upload SSTables only
* once to remote file system.
*/
private String getV2Location() {
ImmutableList.Builder<String> parts = getV2Prefix();
private String getLocation() {
ImmutableList.Builder<String> parts = getPrefix();
// JDK-8177809 truncate to seconds to ensure consistent behavior with our old method of
// getting lastModified time (File::lastModified) in Java 8.
long lastModified = getLastModified().toEpochMilli() / 1_000L * 1_000L;
Expand All @@ -98,7 +89,25 @@ private String getV2Location() {
return toPath(parts.build()).toString();
}

private void parseV2Location(Path remotePath) {
private Path toPath(ImmutableList<String> parts) {
return Paths.get(parts.get(0), parts.subList(1, parts.size()).toArray(new String[0]));
}

/**
* Format of backup path: 1. For old style backups:
* BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNAP|META]/KEYSPACE/COLUMNFAMILY/FILE
*
* <p>2. For new style backups (SnapshotMetaService)
* BASE/[cluster_name_hash]_cluster/TOKEN//[META_V2|SST_V2]/KEYSPACE/COLUMNFAMILY/[last_modified_time_ms]/FILE.compression
*/
@Override
public String getRemotePath() {
return getLocation();
}

@Override
public void parseRemote(String remoteFilepath) {
Path remotePath = Paths.get(remoteFilepath);
Preconditions.checkArgument(
remotePath.getNameCount() >= 8,
String.format("%s has fewer than %d parts", remotePath, 8));
Expand Down Expand Up @@ -128,36 +137,9 @@ private void parseV2Location(Path remotePath) {
Paths.get(config.getDataFileLocation(), parts.toArray(new String[] {})).toFile();
}

private String getV1Location() {
ImmutableList.Builder<String> parts = ImmutableList.builder();
String timeString = DateUtil.formatyyyyMMddHHmm(time);
parts.add(baseDir, region, clusterName, token, timeString, type.toString());
if (BackupFileType.isDataFile(type)) {
parts.add(keyspace, columnFamily);
}
parts.add(fileName);
return toPath(parts.build()).toString();
}

private Path toPath(ImmutableList<String> parts) {
return Paths.get(parts.get(0), parts.subList(1, parts.size()).toArray(new String[0]));
}

private void parseV1Location(Path remotePath) {
Preconditions.checkArgument(
remotePath.getNameCount() >= 7,
String.format("%s has fewer than %d parts", remotePath, 7));
parseV1Prefix(remotePath);
time = DateUtil.getDate(remotePath.getName(4).toString());
type = BackupFileType.valueOf(remotePath.getName(5).toString());
if (BackupFileType.isDataFile(type)) {
keyspace = remotePath.getName(6).toString();
columnFamily = remotePath.getName(7).toString();
}
fileName = remotePath.getName(remotePath.getNameCount() - 1).toString();
}

private void parseV1Prefix(Path remotePath) {
@Override
public void parsePartialPrefix(String remoteFilePath) {
Path remotePath = Paths.get(remoteFilePath);
Preconditions.checkArgument(
remotePath.getNameCount() >= 4,
String.format("%s needs %d parts to parse prefix", remotePath, 4));
Expand All @@ -167,38 +149,6 @@ private void parseV1Prefix(Path remotePath) {
token = remotePath.getName(3).toString();
}

/**
* Format of backup path: 1. For old style backups:
* BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNAP|META]/KEYSPACE/COLUMNFAMILY/FILE
*
* <p>2. For new style backups (SnapshotMetaService)
* BASE/[cluster_name_hash]_cluster/TOKEN//[META_V2|SST_V2]/KEYSPACE/COLUMNFAMILY/[last_modified_time_ms]/FILE.compression
*/
@Override
public String getRemotePath() {
return V2_ONLY_FILE_TYPES.contains(type) ? getV2Location() : getV1Location();
}

@Override
public void parseRemote(String remotePath) {
// Hack to determine type in advance of parsing. Will disappear once v1 is retired
Optional<BackupFileType> inferredType =
Arrays.stream(BackupFileType.values())
.filter(bft -> remotePath.contains(PATH_SEP + bft.toString() + PATH_SEP))
.findAny()
.filter(V2_ONLY_FILE_TYPES::contains);
if (inferredType.isPresent()) {
parseV2Location(Paths.get(remotePath));
} else {
parseV1Location(Paths.get(remotePath));
}
}

@Override
public void parsePartialPrefix(String remoteFilePath) {
parseV1Prefix(Paths.get(remoteFilePath));
}

@Override
public String remotePrefix(Date start, Date end, String location) {
return PATH_JOINER.join(
Expand All @@ -217,7 +167,7 @@ public Path remoteV2Prefix(Path location, BackupFileType fileType) {
clusterName = removeHash(location.getName(2).toString());
}
token = instanceIdentity.getInstance().getToken();
ImmutableList.Builder<String> parts = getV2Prefix();
ImmutableList.Builder<String> parts = getPrefix();
parts.add(fileType.toString());
return toPath(parts.build());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,28 +48,18 @@ public abstract class AbstractBackupPath implements Comparable<AbstractBackupPat

public enum BackupFileType {
CL,
META,
META_V2,
SECONDARY_INDEX_V2,
SNAP,
SNAPSHOT_VERIFIED,
SST,
SST_V2;

private static ImmutableSet<BackupFileType> DATA_FILE_TYPES =
ImmutableSet.of(SECONDARY_INDEX_V2, SNAP, SST, SST_V2);

private static ImmutableSet<BackupFileType> V2_FILE_TYPES =
ImmutableSet.of(SECONDARY_INDEX_V2, SST_V2, META_V2);
ImmutableSet.of(SECONDARY_INDEX_V2, SST_V2);

public static boolean isDataFile(BackupFileType type) {
return DATA_FILE_TYPES.contains(type);
}

public static boolean isV2(BackupFileType type) {
return V2_FILE_TYPES.contains(type);
}

public static BackupFileType fromString(String s) throws BackupRestoreException {
try {
return BackupFileType.valueOf(s);
Expand All @@ -88,7 +78,6 @@ public static BackupFileType fromString(String s) throws BackupRestoreException
protected String token;
protected String region;
protected String indexDir;
protected Date time;
private long size; // uncompressed file size
private long compressedFileSize = 0;
protected final InstanceIdentity instanceIdentity;
Expand Down Expand Up @@ -136,8 +125,6 @@ public void parseLocal(File file, BackupFileType type) {
if (BackupFileType.isDataFile(type)) {
this.keyspace = parts[0];
this.columnFamily = parts[1];
}
if (BackupFileType.isDataFile(type)) {
Optional<BackupFolder> folder = BackupFolder.fromName(parts[2]);
this.isIncremental = folder.filter(BackupFolder.BACKUPS::equals).isPresent();
if (type == BackupFileType.SECONDARY_INDEX_V2) {
Expand All @@ -146,16 +133,6 @@ public void parseLocal(File file, BackupFileType type) {
this.indexDir = parts[index];
}
}

/*
1. For old style snapshots, make this value to time at which backup was executed.
2. This is to ensure that all the files from the snapshot are uploaded under single directory in remote file system.
3. For META files we always override the time field via @link{Metadata#decorateMetaJson}
*/
this.time =
type == BackupFileType.SNAP
? DateUtil.getDate(parts[3])
: new Date(lastModified.toEpochMilli());
}

/** Given a date range, find a common string prefix Eg: 20120212, 20120213 = 2012021 */
Expand All @@ -180,7 +157,6 @@ public File newRestoreFile() {
PATH_JOINER.join(dataDir, keyspace, columnFamily, indexDir, fileName);
return_ = new File(restoreFileName);
break;
case META:
case META_V2:
return_ = new File(PATH_JOINER.join(config.getDataFileLocation(), fileName));
break;
Expand Down Expand Up @@ -254,14 +230,6 @@ public String getRegion() {
return region;
}

public Date getTime() {
return time;
}

public void setTime(Date time) {
this.time = time;
}

/*
@return original, uncompressed file size
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
import java.util.concurrent.*;
import javax.inject.Inject;
import javax.inject.Provider;
import org.apache.commons.collections4.iterators.FilterIterator;
import org.apache.commons.collections4.iterators.TransformIterator;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
Expand Down Expand Up @@ -308,29 +307,6 @@ public Iterator<AbstractBackupPath> listPrefixes(Date date) {
});
}

@Override
public Iterator<AbstractBackupPath> list(String path, Date start, Date till) {
String prefix = pathProvider.get().remotePrefix(start, till, path);
Iterator<String> fileIterator = listFileSystem(prefix, null, null);

@SuppressWarnings("unchecked")
TransformIterator<String, AbstractBackupPath> transformIterator =
new TransformIterator(
fileIterator,
remotePath -> {
AbstractBackupPath abstractBackupPath = pathProvider.get();
abstractBackupPath.parseRemote(remotePath.toString());
return abstractBackupPath;
});

return new FilterIterator<>(
transformIterator,
abstractBackupPath ->
(abstractBackupPath.getTime().after(start)
&& abstractBackupPath.getTime().before(till))
|| abstractBackupPath.getTime().equals(start));
}

@Override
public int getUploadTasksQueued() {
return tasksQueued.size();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ public ImmutableSet<AbstractBackupPath> getBackupPaths(

private CompressionType getCorrectCompressionAlgorithm(
AbstractBackupPath path, Set<String> compressedFiles) {
if (!AbstractBackupPath.BackupFileType.isV2(path.getType())
|| path.getLastModified().toEpochMilli()
< config.getCompressionTransitionEpochMillis()) {
if (path.getLastModified().toEpochMilli() < config.getCompressionTransitionEpochMillis()) {
return CompressionType.SNAPPY;
}
String file = path.getFileName();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import com.google.common.collect.ImmutableMap;
import com.netflix.priam.backupv2.IMetaProxy;
import com.netflix.priam.backupv2.MetaV2Proxy;
import com.netflix.priam.utils.DateUtil;
import java.nio.file.Path;
import java.time.Instant;
Expand Down Expand Up @@ -84,8 +83,7 @@ public static List<AbstractBackupPath> getIncrementalPaths(
DateUtil.DateRange dateRange,
IMetaProxy metaProxy) {
Instant snapshotTime;
if (metaProxy instanceof MetaV2Proxy) snapshotTime = latestValidMetaFile.getLastModified();
else snapshotTime = latestValidMetaFile.getTime().toInstant();
snapshotTime = latestValidMetaFile.getLastModified();
DateUtil.DateRange incrementalDateRange =
new DateUtil.DateRange(snapshotTime, dateRange.getEndTime());
List<AbstractBackupPath> incrementalPaths = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,17 +106,6 @@ default String getShard() {
*/
Path getPrefix();

/**
* List all files in the backup location for the specified time range.
*
* @param path This is used as the `prefix` for listing files in the filesystem. All the files
* that start with this prefix will be returned.
* @param start Start date of the file upload.
* @param till End date of the file upload.
* @return Iterator of the AbstractBackupPath matching the criteria.
*/
Iterator<AbstractBackupPath> list(String path, Date start, Date till);

/** Get a list of prefixes for the cluster available in backup for the specified date */
Iterator<AbstractBackupPath> listPrefixes(Date date);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,8 @@ public static boolean isEnabled(
// Once backup 1.0 is gone, we should not check for enableV2Backups.
enabled =
(configuration.isIncrementalBackupEnabled()
&& (SnapshotBackup.isBackupEnabled(configuration)
|| (backupRestoreConfig.enableV2Backups()
&& SnapshotMetaTask.isBackupEnabled(
backupRestoreConfig))));
&& (backupRestoreConfig.enableV2Backups()
&& SnapshotMetaTask.isBackupEnabled(backupRestoreConfig)));
logger.info("Incremental backups are enabled: {}", enabled);

if (!enabled) {
Expand All @@ -115,21 +113,18 @@ public String getName() {

@Override
protected void processColumnFamily(File backupDir) throws Exception {
BackupFileType fileType =
backupRestoreConfig.enableV2Backups() ? BackupFileType.SST_V2 : BackupFileType.SST;

// upload SSTables and components
ImmutableList<ListenableFuture<AbstractBackupPath>> futures =
backupHelper.uploadAndDeleteAllFiles(
backupDir, fileType, config.enableAsyncIncremental());
backupDir, BackupFileType.SST_V2, config.enableAsyncIncremental());
Futures.whenAllComplete(futures).call(() -> null, MoreExecutors.directExecutor());

// Next, upload secondary indexes
fileType = BackupFileType.SECONDARY_INDEX_V2;
boolean async = config.enableAsyncIncremental();
for (File directory : getSecondaryIndexDirectories(backupDir)) {
futures =
backupHelper.uploadAndDeleteAllFiles(
directory, fileType, config.enableAsyncIncremental());
directory, BackupFileType.SECONDARY_INDEX_V2, async);
if (futures.stream().allMatch(ListenableFuture::isDone)) {
deleteIfEmpty(directory);
} else {
Expand Down
Loading

0 comments on commit 281cd17

Please sign in to comment.