diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/.gitignore b/JeMPI_Apps/JeMPI_BackupRestoreAPI/.gitignore new file mode 100644 index 000000000..344687dd2 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/.gitignore @@ -0,0 +1,116 @@ +### Java template +# Compiled class file +*.class + +# Log file +*.log + +# BlueJ files +*.ctxt + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + +### Maven template +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +release.properties +dependency-reduced-pom.xml +buildNumber.properties +.mvn/timing.properties +# https://github.com/takari/maven-wrapper#usage-without-binary-jar +.mvn/wrapper/maven-wrapper.jar + +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# SonarLint +.idea/sonarlint + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +# Ignore config-api.json, should be copied from JeMPI_Apps/JeMPI_Configuration on build +config-api.json diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/build.sh b/JeMPI_Apps/JeMPI_BackupRestoreAPI/build.sh new file mode 100755 index 000000000..77b8e8d5b --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e +set -u + +source $PROJECT_DEVOPS_DIR/conf/images/conf-app-images.sh +source ../build-check-jdk.sh + +JAR_FILE=${BACKUP_RESTORE_API_JAR} +APP_IMAGE=${BACKUP_RESTORE_API_IMAGE} +APP=backup-restore-api +source ../build-app-image.sh diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/checkstyle/suppression.xml b/JeMPI_Apps/JeMPI_BackupRestoreAPI/checkstyle/suppression.xml new file mode 100644 index 000000000..ae92a113b --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/checkstyle/suppression.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/.gitignore b/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/.gitignore new file mode 100644 index 000000000..c9d5c946a --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/.gitignore @@ -0,0 +1,3 @@ +* +!.gitignore +!Dockerfile diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/Dockerfile b/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/Dockerfile new file mode 100644 index 000000000..800a1da52 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/docker/Dockerfile @@ -0,0 +1,13 @@ +ARG JAVA_VERSION + +FROM eclipse-temurin:${JAVA_VERSION}-jre + +ADD JeMPI_BackupRestoreAPI-1.0-SNAPSHOT-spring-boot.jar /app/app.jar + +RUN printf "#!/bin/bash\n\ +cd /app\n\ +java -server -XX:MaxRAMPercentage=80 -jar /app/app.jar\n" > /entrypoint.sh + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/pom.xml b/JeMPI_Apps/JeMPI_BackupRestoreAPI/pom.xml new file mode 100644 index 000000000..8fff416ae --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/pom.xml @@ -0,0 +1,244 @@ + + + 4.0.0 + + org.jembi.jempi + JeMPI + 1.0-SNAPSHOT + + + JeMPI_BackupRestoreAPI + jar + + + ${project.groupId}.backuprestoreapi.api.BackupRestoreAPI + + + + + + org.jembi.jempi + JeMPI_LibShared + 1.0-SNAPSHOT + + + + org.jembi.jempi + JeMPI_LibMPI + 1.0-SNAPSHOT + + + + org.jembi.jempi + JeMPI_LibAPI + 1.0-SNAPSHOT + + + + org.postgresql + postgresql + + + + com.typesafe + config + + + + io.vavr + vavr + + + + org.apache.kafka + kafka-clients + + + + org.apache.kafka + kafka-streams + + + + com.typesafe.akka + akka-actor-typed_${scala.tools.version} + + + + com.typesafe.akka + akka-stream_${scala.tools.version} + + + + com.typesafe.akka + akka-http_${scala.tools.version} + + + + com.typesafe.akka + akka-http-jackson_${scala.tools.version} + + + + ch.megard + akka-http-cors_${scala.tools.version} + + + + com.fasterxml.jackson.core + jackson-annotations + + + + com.fasterxml.jackson.core + jackson-databind + + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + + commons-codec + commons-codec + + + + org.apache.commons + commons-lang3 + + + + org.apache.commons + commons-text + + + + io.dgraph + dgraph4j + + + + org.apache.logging.log4j + log4j-api + + + + org.apache.logging.log4j + log4j-core + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + + org.apache.logging.log4j + log4j-jcl + + + + org.slf4j + slf4j-api + + + + org.slf4j + slf4j-simple + test + + + + org.junit.jupiter + junit-jupiter + test + + + + com.typesafe.akka + akka-actor-testkit-typed_${scala.tools.version} + test + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + validate + compile + + checkstyle/suppression.xml + + + check + + + + + + + org.springframework.boot + spring-boot-maven-plugin + ${version.org.springframework.boot.spring-boot-maven-plugin} + + + + repackage + + + spring-boot + ${start-class} + + + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + [1.0.0,) + + enforce + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/push.sh b/JeMPI_Apps/JeMPI_BackupRestoreAPI/push.sh new file mode 100755 index 000000000..cde15df24 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/push.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e +set -u + +source $PROJECT_DEVOPS_DIR/conf.env +source $PROJECT_DEVOPS_DIR/conf/images/conf-app-images.sh + +docker tag ${BACKUP_RESTORE_API_IMAGE} ${REGISTRY_NODE_IP}/${BACKUP_RESTORE_API_IMAGE} +docker push ${REGISTRY_NODE_IP}/${BACKUP_RESTORE_API_IMAGE} +docker rmi ${REGISTRY_NODE_IP}/${BACKUP_RESTORE_API_IMAGE} + diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/AppConfig.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/AppConfig.java new file mode 100644 index 000000000..96e79d99d --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/AppConfig.java @@ -0,0 +1,124 @@ +package org.jembi.jempi.backuprestoreapi; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.File; +import java.util.Arrays; + +public final class AppConfig { + + private static final Logger LOGGER = LogManager.getLogger(AppConfig.class); + private static final Config SYSTEM_PROPERTIES = ConfigFactory.systemProperties(); + private static final Config SYSTEM_ENVIRONMENT = ConfigFactory.systemEnvironment(); + public static final Config CONFIG = new Builder().withSystemEnvironment() + .withSystemProperties() + .withOptionalRelativeFile("/conf/server.production.conf") + .withOptionalRelativeFile("/conf/server.staging.conf") + .withOptionalRelativeFile("/conf/server.test.conf") + .withResource("application.local.conf") + .withResource("application.conf") + .build(); + public static final String POSTGRESQL_IP = CONFIG.getString("POSTGRESQL_IP"); + public static final Integer POSTGRESQL_PORT = CONFIG.getInt("POSTGRESQL_PORT"); + + public static final String POSTGRESQL_USER = CONFIG.getString("POSTGRESQL_USER"); + public static final String POSTGRESQL_PASSWORD = CONFIG.getString("POSTGRESQL_PASSWORD"); + public static final String POSTGRESQL_NOTIFICATIONS_DB = CONFIG.getString("POSTGRESQL_NOTIFICATIONS_DB"); + public static final String POSTGRESQL_AUDIT_DB = CONFIG.getString("POSTGRESQL_AUDIT_DB"); + public static final String KAFKA_BOOTSTRAP_SERVERS = CONFIG.getString("KAFKA_BOOTSTRAP_SERVERS"); + public static final String KAFKA_APPLICATION_ID = CONFIG.getString("KAFKA_APPLICATION_ID"); + private static final String[] DGRAPH_ALPHA_HOSTS = CONFIG.getString("DGRAPH_HOSTS").split(","); + private static final int[] DGRAPH_ALPHA_PORTS = Arrays.stream(CONFIG.getString("DGRAPH_PORTS").split(",")).mapToInt(s -> { + try { + return Integer.parseInt(s); + } catch (NumberFormatException ex) { + return Integer.MIN_VALUE; + } + }).toArray(); + + public static final String LINKER_IP = CONFIG.getString("LINKER_IP"); + public static final Integer LINKER_HTTP_PORT = CONFIG.getInt("LINKER_HTTP_PORT"); + + public static final String CONTROLLER_IP = CONFIG.getString("CONTROLLER_IP"); + public static final Integer CONTROLLER_HTTP_PORT = CONFIG.getInt("CONTROLLER_HTTP_PORT"); + public static final Integer API_HTTP_PORT = CONFIG.getInt("API_HTTP_PORT"); + public static final Integer BACKUP_RESTORE_API_HTTP_PORT = CONFIG.getInt("BACKUP_RESTORE_API_HTTP_PORT"); + public static final Level GET_LOG_LEVEL = Level.toLevel(CONFIG.getString("LOG4J2_LEVEL")); + + private AppConfig() { + } + + public static String[] getDGraphHosts() { + return DGRAPH_ALPHA_HOSTS; + } + + public static int[] getDGraphPorts() { + return DGRAPH_ALPHA_PORTS; + } + + private static class Builder { + private Config conf = ConfigFactory.empty(); + + Builder() { + LOGGER.info("Loading configs first row is highest priority, second row is fallback and so on"); + } + + // This should return the current executing user path + private static String getExecutionDirectory() { + return SYSTEM_PROPERTIES.getString("user.dir"); + } + + Builder withSystemProperties() { + conf = conf.withFallback(SYSTEM_PROPERTIES); + LOGGER.info("Loaded system properties into config"); + return this; + } + + Builder withSystemEnvironment() { + conf = conf.withFallback(SYSTEM_ENVIRONMENT); + LOGGER.info("Loaded system environment into config"); + return this; + } + + Builder withResource(final String resource) { + Config resourceConfig = ConfigFactory.parseResources(resource); + String empty = resourceConfig.entrySet().isEmpty() + ? " contains no values" + : ""; + conf = conf.withFallback(resourceConfig); + LOGGER.info("Loaded config file from resource ({}){}", resource, empty); + return this; + } + + Builder withOptionalFile(final String path) { + File secureConfFile = new File(path); + if (secureConfFile.exists()) { + LOGGER.info("Loaded config file from path ({})", path); + conf = conf.withFallback(ConfigFactory.parseFile(secureConfFile)); + } else { + LOGGER.info("Attempted to load file from path ({}) but it was not found", path); + } + return this; + } + + Builder withOptionalRelativeFile(final String path) { + return withOptionalFile(getExecutionDirectory() + path); + } + + Config build() { + // Resolve substitutions. + conf = conf.resolve(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Logging properties. Make sure sensitive data such as passwords or secrets are not logged!"); + LOGGER.debug(conf.root().render()); + } + return conf; + } + + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Ask.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Ask.java new file mode 100644 index 000000000..2dc4a099d --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Ask.java @@ -0,0 +1,56 @@ +package org.jembi.jempi.backuprestoreapi; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.ActorSystem; +import akka.actor.typed.javadsl.AskPattern; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.shared.models.GlobalConstants; +import org.jembi.jempi.shared.models.ApiModels; +import org.jembi.jempi.shared.models.RestoreGoldenRecords; + +import java.util.concurrent.CompletionStage; + +public final class Ask { + + private static final Logger LOGGER = LogManager.getLogger(Ask.class); + + private Ask() { + } + + static CompletionStage getGidsAll( + final ActorSystem actorSystem, + final ActorRef backEnd) { + CompletionStage stage = AskPattern + .ask(backEnd, + BackEnd.GetGidsAllRequest::new, + java.time.Duration.ofSeconds(GlobalConstants.TIMEOUT_DGRAPH_QUERY_SECS), + actorSystem.scheduler()); + return stage.thenApply(response -> response); + + } + + static CompletionStage getExpandedGoldenRecord( + final ActorSystem actorSystem, + final ActorRef backEnd, + final ApiModels.ApiGoldenRecords payload) { + final CompletionStage stage = AskPattern + .ask(backEnd, + replyTo -> new BackEnd.GetExpandedGoldenRecordRequest(replyTo, payload.gid()), + java.time.Duration.ofSeconds(GlobalConstants.TIMEOUT_DGRAPH_QUERY_SECS), + actorSystem.scheduler()); + return stage.thenApply(response -> response); + } + + static CompletionStage postGoldenRecord( + final ActorSystem actorSystem, + final ActorRef backEnd, + final RestoreGoldenRecords payload) { + CompletionStage stage = AskPattern + .ask(backEnd, + replyTo -> new BackEnd.PostGoldenRecordRequest(replyTo, payload), + java.time.Duration.ofSeconds(GlobalConstants.TIMEOUT_DGRAPH_QUERY_SECS), + actorSystem.scheduler()); + return stage.thenApply(response -> response); + } +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/BackEnd.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/BackEnd.java new file mode 100644 index 000000000..ad3f7ca7a --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/BackEnd.java @@ -0,0 +1,362 @@ +package org.jembi.jempi.backuprestoreapi; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.Behavior; +import akka.actor.typed.javadsl.*; +import io.vavr.control.Either; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.libmpi.LibMPI; +import org.jembi.jempi.libmpi.MpiGeneralError; +import org.jembi.jempi.libmpi.MpiServiceError; +import org.jembi.jempi.shared.models.*; + +import java.util.Collections; +import java.util.List; + +public final class BackEnd extends AbstractBehavior { + + private static final Logger LOGGER = LogManager.getLogger(BackEnd.class); + private final String pgIP; + private final Integer pgPort; + private final String pgUser; + private final String pgPassword; + private final String pgNotificationsDb; + private final String pgAuditDb; + private final PsqlNotifications psqlNotifications; + private LibMPI libMPI = null; + private String[] dgraphHosts = null; + private int[] dgraphPorts = null; + + private BackEnd( + final Level debugLevel, + final ActorContext context, + final String[] dgraphHosts, + final int[] dgraphPorts, + final String sqlIP, + final int sqlPort, + final String sqlUser, + final String sqlPassword, + final String sqlNotificationsDb, + final String sqlAuditDb, + final String kafkaBootstrapServers, + final String kafkaClientId) { + super(context); + try { + this.libMPI = null; + this.dgraphHosts = dgraphHosts; + this.dgraphPorts = dgraphPorts; + this.pgIP = sqlIP; + this.pgPort = sqlPort; + this.pgUser = sqlUser; + this.pgPassword = sqlPassword; + this.pgNotificationsDb = sqlNotificationsDb; + this.pgAuditDb = sqlAuditDb; + psqlNotifications = new PsqlNotifications(sqlIP, sqlPort, sqlNotificationsDb, sqlUser, sqlPassword); + openMPI(kafkaBootstrapServers, kafkaClientId, debugLevel); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + throw e; + } + + } + + public static Behavior create( + final Level level, + final String[] dgraphHosts, + final int[] dgraphPorts, + final String sqlIP, + final int sqlPort, + final String sqlUser, + final String sqlPassword, + final String sqlNotificationsDb, + final String sqlAuditDb, + final String kafkaBootstrapServers, + final String kafkaClientId) { + return Behaviors.setup(context -> new BackEnd(level, + context, + dgraphHosts, + dgraphPorts, + sqlIP, + sqlPort, + sqlUser, + sqlPassword, + sqlNotificationsDb, + sqlAuditDb, + kafkaBootstrapServers, + kafkaClientId)); + } + + private void openMPI( + final String kafkaBootstrapServers, + final String kafkaClientId, + final Level debugLevel) { + libMPI = new LibMPI(debugLevel, dgraphHosts, dgraphPorts, kafkaBootstrapServers, kafkaClientId); + } + + @Override + public Receive createReceive() { + return actor(); + } + + public Receive actor() { + ReceiveBuilder builder = newReceiveBuilder(); + return builder.onMessage(CountGoldenRecordsRequest.class, this::countGoldenRecordsHandler) + .onMessage(CountInteractionsRequest.class, this::countInteractionsHandler) + .onMessage(CountRecordsRequest.class, this::countRecordsHandler) + .onMessage(FindExpandedSourceIdRequest.class, this::findExpandedSourceIdHandler) + .onMessage(GetGidsAllRequest.class, this::getGidsAllHandler) + .onMessage(GetInteractionRequest.class, this::getInteractionHandler) + .onMessage(GetExpandedInteractionsRequest.class, this::getExpandedInteractionsHandler) + .onMessage(GetExpandedGoldenRecordRequest.class, this::getExpandedGoldenRecordHandler) + .onMessage(PostGoldenRecordRequest.class, this::postGoldenRecordRequestHandler) + .onMessage(GetExpandedGoldenRecordsRequest.class, this::getExpandedGoldenRecordsHandler) + .build(); + } + private Behavior countGoldenRecordsHandler(final CountGoldenRecordsRequest request) { + try { + final long count = libMPI.countGoldenRecords(); + request.replyTo.tell(new CountGoldenRecordsResponse(Either.right(count))); + } catch (Exception exception) { + LOGGER.error("libMPI.countGoldenRecords failed with error message: {}", exception.getMessage()); + request.replyTo.tell( + new CountGoldenRecordsResponse(Either.left(new MpiServiceError.GeneralError(exception.getMessage())))); + } + return Behaviors.same(); + } + + private Behavior countInteractionsHandler(final CountInteractionsRequest request) { + try { + final long count = libMPI.countInteractions(); + request.replyTo.tell(new CountInteractionsResponse(Either.right(count))); + } catch (Exception exception) { + LOGGER.error("libMPI.countPatientRecords failed with error message: {}", exception.getMessage()); + request.replyTo.tell( + new CountInteractionsResponse(Either.left(new MpiServiceError.GeneralError(exception.getMessage())))); + } + return Behaviors.same(); + } + + private Behavior countRecordsHandler(final CountRecordsRequest request) { + final var recs = libMPI.countGoldenRecords(); + final var docs = libMPI.countInteractions(); + request.replyTo.tell(new CountRecordsResponse(recs, docs)); + return Behaviors.same(); + } + + private Behavior findExpandedSourceIdHandler(final FindExpandedSourceIdRequest request) { + final var sourceIdList = libMPI.findExpandedSourceIdList(request.facility, request.client); + request.replyTo.tell(new FindExpandedSourceIdResponse(sourceIdList)); + + return Behaviors.same(); + } + + private Behavior getGidsAllHandler(final GetGidsAllRequest request) { + var recs = libMPI.findGoldenIds(); + request.replyTo.tell(new GetGidsAllResponse(recs)); + return Behaviors.same(); + } + + private Behavior getExpandedGoldenRecordHandler(final GetExpandedGoldenRecordRequest request) { + ExpandedGoldenRecord expandedGoldenRecord = null; + try { + expandedGoldenRecord = libMPI.findExpandedGoldenRecord(request.goldenId); + } catch (Exception e) { + LOGGER.error(e.getLocalizedMessage(), e); + LOGGER.error("libMPI.findExpandedGoldenRecord failed for goldenId: {} with error: {}", + request.goldenId, + e.getMessage()); + } + + if (expandedGoldenRecord == null) { + request.replyTo + .tell(new GetExpandedGoldenRecordResponse(Either.left(new MpiServiceError.GoldenIdDoesNotExistError( + "Golden Record does not exist", + request.goldenId)))); + } else { + request.replyTo.tell(new GetExpandedGoldenRecordResponse(Either.right(expandedGoldenRecord))); + } + return Behaviors.same(); + } + + private Behavior getExpandedGoldenRecordsHandler(final GetExpandedGoldenRecordsRequest request) { + List goldenRecords = null; + try { + goldenRecords = libMPI.findExpandedGoldenRecords(request.goldenIds); + } catch (Exception exception) { + LOGGER.error("libMPI.findExpandedGoldenRecords failed for goldenIds: {} with error: {}", + request.goldenIds, + exception.getMessage()); + } + + if (goldenRecords == null) { + request.replyTo + .tell(new GetExpandedGoldenRecordsResponse(Either.left(new MpiServiceError.GoldenIdDoesNotExistError( + "Golden Records do not exist", + Collections.singletonList(request.goldenIds).toString())))); + } else { + request.replyTo.tell(new GetExpandedGoldenRecordsResponse(Either.right(goldenRecords))); + } + return Behaviors.same(); + } + + private Behavior postGoldenRecordRequestHandler(final PostGoldenRecordRequest request) { + String goldenRecords = null; + try { + goldenRecords = libMPI.postGoldenRecord(request.goldenRecord); + } catch (Exception exception) { + LOGGER.error("libMPI.postGoldenRecord failed for goldenIds: {} with error: {}", + request.goldenRecord, + exception.getMessage()); + } + request.replyTo.tell(new PostGoldenRecordResponse(goldenRecords)); + return Behaviors.same(); + } + + private Behavior getExpandedInteractionsHandler(final GetExpandedInteractionsRequest request) { + List expandedInteractions = null; + try { + expandedInteractions = libMPI.findExpandedInteractions(request.patientIds); + } catch (Exception exception) { + LOGGER.error("libMPI.findExpandedPatientRecords failed for patientIds: {} with error: {}", + request.patientIds, + exception.getMessage()); + } + + if (expandedInteractions == null) { + request.replyTo + .tell(new GetExpandedInteractionsResponse(Either.left(new MpiServiceError.InteractionIdDoesNotExistError( + "Patient Records do not exist", + Collections.singletonList(request.patientIds).toString())))); + } else { + request.replyTo.tell(new GetExpandedInteractionsResponse(Either.right(expandedInteractions))); + } + return Behaviors.same(); + } + + private Behavior getInteractionHandler(final GetInteractionRequest request) { + Interaction interaction = null; + try { + interaction = libMPI.findInteraction(request.iid); + } catch (Exception exception) { + LOGGER.error("libMPI.findPatientRecord failed for patientId: {} with error: {}", request.iid, + exception.getMessage()); + } + + if (interaction == null) { + request.replyTo.tell(new GetInteractionResponse(Either.left(new MpiServiceError.InteractionIdDoesNotExistError( + "Interaction not found", + request.iid)))); + } else { + request.replyTo.tell(new GetInteractionResponse(Either.right(interaction))); + } + return Behaviors.same(); + } + + + + public interface Event { + } + + public interface EventResponse { + } + + public record CountGoldenRecordsRequest(ActorRef replyTo) implements Event { + } + + public record CountGoldenRecordsResponse(Either count) implements EventResponse { + } + + public record CountInteractionsRequest(ActorRef replyTo) implements Event { + } + + public record CountInteractionsResponse(Either count) implements EventResponse { + } + + public record CountRecordsRequest(ActorRef replyTo) implements Event { + } + + public record CountRecordsResponse( + long goldenRecords, + long patientRecords) implements EventResponse { + } + + public record GetGidsPagedRequest( + ActorRef replyTo, + long offset, + long length) implements Event { + } + + public record GetGidsPagedResponse(List goldenIds) implements EventResponse { + } + + public record GetGoldenRecordAuditTrailRequest( + ActorRef replyTo, + String uid) implements Event { + } + + public record GetGoldenRecordAuditTrailResponse(List auditTrail) { + } + + public record GetGidsAllRequest(ActorRef replyTo) implements Event { + } + + public record GetGidsAllResponse(List records) implements EventResponse { + } + + public record FindExpandedSourceIdRequest( + ActorRef replyTo, + String facility, + String client) implements Event { + } + + public record FindExpandedSourceIdResponse(List records) implements EventResponse { + } + + public record GetExpandedGoldenRecordRequest( + ActorRef replyTo, + String goldenId) implements Event { + } + + public record GetExpandedGoldenRecordResponse(Either goldenRecord) + implements EventResponse { + } + + public record GetExpandedGoldenRecordsRequest( + ActorRef replyTo, + List goldenIds) implements Event { + } + + public record GetExpandedGoldenRecordsResponse( + Either> expandedGoldenRecords) implements EventResponse { + } + + public record GetExpandedInteractionsRequest( + ActorRef replyTo, + List patientIds) implements Event { + } + + public record GetExpandedInteractionsResponse( + Either> expandedPatientRecords) implements EventResponse { + } + + public record GetInteractionRequest( + ActorRef replyTo, + String iid) implements Event { + } + + public record GetInteractionResponse(Either patient) implements EventResponse { + } + + public record PostGoldenRecordRequest( + ActorRef replyTo, + RestoreGoldenRecords goldenRecord) implements Event { + } + + public record PostGoldenRecordResponse(String goldenRecord) + implements EventResponse { + } + + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/JsonFieldsConfig.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/JsonFieldsConfig.java new file mode 100644 index 000000000..93f8b141a --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/JsonFieldsConfig.java @@ -0,0 +1,99 @@ +package org.jembi.jempi.backuprestoreapi; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.shared.utils.AppUtils; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; + +import java.io.*; +import java.nio.file.FileSystems; + +public final class JsonFieldsConfig { + + private static final Logger LOGGER = LogManager.getLogger(JsonFieldsConfig.class); + public String jsonFields; + private JSONArray fields; + + public JsonFieldsConfig(final String resourceFilename) { + try { + load(resourceFilename); + } catch (Exception e) { + LOGGER.debug(e); + } + } + + + private JSONArray buildFieldsResponsePayload( + final JSONArray systemFields, + final JSONArray customFields) { + JSONArray result = new JSONArray(); + // Process system fields + for (Object systemField : systemFields) { + JSONObject field = (JSONObject) systemField; + // Mark field as readonly + field.put("readOnly", true); + // Merge array values + result.add(field); + } + // Process custom fields + for (Object customField : customFields) { + // Convert field names from snake case to camel case + JSONObject field = (JSONObject) customField; + String fieldName = (String) field.get("fieldName"); + field.put("fieldName", AppUtils.snakeToCamelCase(fieldName)); + // Remove extra attributes + field.remove("indexGoldenRecord"); + field.remove("indexPatient"); + field.remove("m"); + field.remove("u"); + // Mark field as editable + field.put("readOnly", false); + // Merge array values + result.add(field); + } + return result; + } + + private InputStream getFileStreamFromResource(final String resourceFilename) { + ClassLoader classLoader = getClass().getClassLoader(); + return classLoader.getResourceAsStream(resourceFilename); + } + + public void load(final String filename) { + final var separator = FileSystems.getDefault().getSeparator(); + final var filePath = "%sapp%sconf_system%s%s".formatted(separator, separator, separator, filename); + final var file = new File(filePath); + try (Reader reader = new InputStreamReader(new FileInputStream(file))) { + JSONParser jsonParser = new JSONParser(); + Object obj = jsonParser.parse(reader); + JSONObject config = (JSONObject) obj; + // System fields are fields that exists regardless of the implementation + JSONArray systemFields = (JSONArray) config.get("systemFields"); + // Custom fields depend on the needs of the implementation + JSONArray customFields = (JSONArray) config.get("fields"); + jsonFields = buildFieldsResponsePayload(systemFields, customFields).toJSONString(); + } catch (IOException | ParseException e) { + throw new RuntimeException(e); + } + +// +// JSONParser jsonParser = new JSONParser(); +// try (Reader reader = new InputStreamReader(getFileStreamFromResource(resourceFilename))) { +// // Read JSON file +// Object obj = jsonParser.parse(reader); +// JSONObject config = (JSONObject) obj; +// // System fields are fields that exists regardless of the implementation +// JSONArray systemFields = (JSONArray) config.get("systemFields"); +// // Custom fields depend on the needs of the implementation +// JSONArray customFields = (JSONArray) config.get("fields"); +// jsonFields = buildFieldsResponsePayload(systemFields, customFields).toJSONString(); +// } catch (ParseException | IOException e) { +// LOGGER.error(e.getLocalizedMessage(), e); +// fields = new JSONArray(); +// } + } +} + diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/MapError.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/MapError.java new file mode 100644 index 000000000..80d6af6c0 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/MapError.java @@ -0,0 +1,42 @@ +package org.jembi.jempi.backuprestoreapi; + +import akka.http.javadsl.marshallers.jackson.Jackson; +import akka.http.javadsl.marshalling.Marshaller; +import akka.http.javadsl.model.RequestEntity; +import akka.http.javadsl.model.StatusCodes; +import akka.http.javadsl.server.Route; +import org.jembi.jempi.libmpi.MpiGeneralError; +import org.jembi.jempi.libmpi.MpiServiceError; + +import static akka.http.javadsl.server.Directives.complete; +import static org.jembi.jempi.shared.utils.AppUtils.OBJECT_MAPPER; + +public final class MapError { + + private static final Marshaller JSON_MARSHALLER = Jackson.marshaller(OBJECT_MAPPER); + + private MapError() { + } + + public static Route mapError(final MpiGeneralError obj) { + return switch (obj) { + case MpiServiceError.InteractionIdDoesNotExistError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.GoldenIdDoesNotExistError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.GoldenIdInteractionConflictError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.DeletePredicateError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.InvalidFunctionError e -> complete(StatusCodes.UNPROCESSABLE_ENTITY, e, JSON_MARSHALLER); + case MpiServiceError.InvalidOperatorError e -> complete(StatusCodes.UNPROCESSABLE_ENTITY, e, JSON_MARSHALLER); + case MpiServiceError.NoScoreGivenError e -> complete(StatusCodes.PARTIAL_CONTENT, e, JSON_MARSHALLER); + case MpiServiceError.NotImplementedError e -> complete(StatusCodes.NOT_IMPLEMENTED, e, JSON_MARSHALLER); + case MpiServiceError.CRClientExistsError e -> complete(StatusCodes.CONFLICT, e, JSON_MARSHALLER); + case MpiServiceError.CRUpdateFieldError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.CRGidDoesNotExistError e -> complete(StatusCodes.NOT_FOUND, e, JSON_MARSHALLER); + case MpiServiceError.CRLinkUpdateError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.CRMissingFieldError e -> complete(StatusCodes.BAD_REQUEST, e, JSON_MARSHALLER); + case MpiServiceError.GeneralError e -> complete(StatusCodes.INTERNAL_SERVER_ERROR, e, JSON_MARSHALLER); + case MpiServiceError.InternalError e -> complete(StatusCodes.INTERNAL_SERVER_ERROR, e, JSON_MARSHALLER); + default -> complete(StatusCodes.INTERNAL_SERVER_ERROR); + }; + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlClient.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlClient.java new file mode 100644 index 000000000..f50895aa1 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlClient.java @@ -0,0 +1,113 @@ +package org.jembi.jempi.backuprestoreapi; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.sql.*; +import java.util.Locale; + +final class PsqlClient { + + private static final Logger LOGGER = LogManager.getLogger(PsqlClient.class); + private final String dbIP; + private final int dbPort; + private final String database; + private final String user; + private final String password; + + private Connection connection; + + PsqlClient( + final String pgIP, + final int pgPort, + final String pgDatabase, + final String pgUser, + final String pgPassword) { + dbIP = pgIP; + dbPort = pgPort; + connection = null; + database = pgDatabase; + user = pgUser; + password = pgPassword; + } + + boolean connect() { + if (connection == null) { + final var url = String.format(Locale.ROOT, "jdbc:postgresql://%s:%d/%s", dbIP, dbPort, database); + try { + connection = DriverManager.getConnection(url, user, password); + return connection.isValid(5); + } catch (SQLException e) { + LOGGER.error("{} {} {}", url, user, password); + LOGGER.error(e.getLocalizedMessage(), e); + connection = null; + return false; + } + } else { + try { + if (!connection.isValid(5)) { + connection.close(); + final var url = String.format(Locale.ROOT, "jdbc:postgresql://%s:%d/%s", dbIP, dbPort, database); + connection = DriverManager.getConnection(url, user, password); + } + } catch (SQLException e) { + LOGGER.error(e.getLocalizedMessage(), e); + connection = null; + } + } + try { + if (connection == null) { + return false; + } else { + return connection.isValid(5); + } + } catch (SQLException e) { + LOGGER.error(e.getLocalizedMessage(), e); + connection = null; + return false; + } + } + + + void setAutoCommit(final boolean autoCommit) { + try { + connection.setAutoCommit(autoCommit); + } catch (SQLException e) { + LOGGER.error(e.getLocalizedMessage(), e); + } + } + + void commit() { + try { + connection.commit(); + } catch (SQLException e) { + LOGGER.error(e.getLocalizedMessage(), e); + } + } + + void disconnect() { + if (connection != null) { + try { + connection.close(); + } catch (SQLException e) { + LOGGER.error(e.getLocalizedMessage(), e); + } + connection = null; + } + } + + Statement createStatement() throws SQLException { + return connection.createStatement(); + } + + PreparedStatement prepareStatement(final String sql) throws SQLException { + return connection.prepareStatement(sql); + } + + PreparedStatement prepareStatement( + final String sql, + final int resultSetType) throws SQLException { + return connection.prepareStatement(sql, resultSetType); + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlNotifications.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlNotifications.java new file mode 100644 index 000000000..5f9b68571 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/PsqlNotifications.java @@ -0,0 +1,188 @@ +package org.jembi.jempi.backuprestoreapi; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.shared.models.MatchesForReviewResult; + +import java.sql.*; +import java.util.*; + +final class PsqlNotifications { + + private static final String NOTIFICATION_TABLE_NAME = "notification"; + private static final String QUERY = """ + SELECT patient_id, id, names, created, state,type, score, old_golden_id, current_golden_id + FROM notification + WHERE created BETWEEN ? AND ? AND state IN (?, ?) + ORDER BY created + LIMIT ? OFFSET ? + """; + private static final Logger LOGGER = LogManager.getLogger(PsqlNotifications.class); + private final PsqlClient psqlClient; + + PsqlNotifications( + final String pgServer, + final int pgPort, + final String pgDatabase, + final String pgUser, + final String pgPassword) { + psqlClient = new PsqlClient(pgServer, pgPort, pgDatabase, pgUser, pgPassword); + } + + /** + * Retrieves matches for review based on the provided parameters. + * + * @param limit The maximum number of matches to retrieve. + * @param offset The number of matches to skip from the beginning. + * @param date The date threshold for match creation. + * @param states The state of notification. + * @return A {@link MatchesForReviewResult} object containing the matches and related information. + */ + + MatchesForReviewResult getMatchesForReview( + final int limit, + final int offset, + final Timestamp startDate, + final Timestamp endDate, + final List states) { + final var list = new ArrayList>(); + MatchesForReviewResult result = new MatchesForReviewResult(); + int skippedRows = 0; + psqlClient.connect(); + try (PreparedStatement preparedStatement = psqlClient.prepareStatement(QUERY); + PreparedStatement countStatement = psqlClient.prepareStatement( + "SELECT COUNT(*) FROM notification WHERE created BETWEEN ? AND ? AND state IN (?, ?)")) { + countStatement.setTimestamp(1, startDate); + countStatement.setTimestamp(2, endDate); + countStatement.setString(3, extractState(0, states)); + countStatement.setString(4, extractState(1, states)); + ResultSet countRs = countStatement.executeQuery(); + countRs.next(); + int totalCount = countRs.getInt(1); + preparedStatement.setTimestamp(1, startDate); + preparedStatement.setTimestamp(2, endDate); + preparedStatement.setString(3, extractState(0, states)); + preparedStatement.setString(4, extractState(1, states)); + preparedStatement.setInt(5, limit); + preparedStatement.setInt(6, offset); + ResultSet rs = preparedStatement.executeQuery(); + ResultSetMetaData md = rs.getMetaData(); + int columns = md.getColumnCount(); + UUID notificationID = null; + while (rs.next()) { + final var row = new HashMap(columns); + for (int i = 1; i <= columns; i++) { + if (md.getColumnName(i).equals("id")) { + notificationID = rs.getObject(i, UUID.class); + } + final var name = md.getColumnName(i); + final var obj = rs.getObject(i); + if (obj == null && "names".equals(name)) { + row.put(name, ""); + } else { + row.put(name, (obj)); + } + } + list.add(row); + row.put("candidates", getCandidates(notificationID)); + skippedRows++; + } + result.setCount(list.size()); + result.setSkippedRecords(totalCount - list.size()); + } catch (SQLException e) { + LOGGER.error(e); + } + result.setNotifications(list); + return result; + } + + public int getNotificationCount(final String status) { + String queryStatement = status == null + ? String.format("SELECT COUNT(*) FROM %s", NOTIFICATION_TABLE_NAME) + : String.format("SELECT COUNT(*) FROM %s WHERE state = '%s'", NOTIFICATION_TABLE_NAME, status); + + psqlClient.connect(); + try (PreparedStatement preparedStatement = psqlClient.prepareStatement(queryStatement); + ResultSet resultSet = preparedStatement.executeQuery()) { + if (resultSet.next()) { + return resultSet.getInt(1); + } + return 0; + } catch (SQLException e) { + LOGGER.error(e); + } + return -1; + } + + String extractState( + final int index, + final List states) { + if (index + 1 > states.size()) { + return null; + } + return states.get(index); + } + + List> getCandidates(final UUID nID) { + final var list = new ArrayList>(); + String candidates = "select notification_id, score, golden_id from candidates where notification_id IN ('" + nID + "')"; + + psqlClient.connect(); + try (PreparedStatement preparedStatement = psqlClient.prepareStatement(candidates)) { + ResultSet rs = preparedStatement.executeQuery(); + ResultSetMetaData md = rs.getMetaData(); + int columns = md.getColumnCount(); + while (rs.next()) { + final var row = new HashMap(columns); + for (int i = 1; i <= columns; i++) { + if (md.getColumnName(i).equals("notification_id")) { + row.put("score", (rs.getObject("score"))); + row.put("golden_id", (rs.getObject("golden_id"))); + } + } + if (!row.isEmpty()) { + list.add(row); + } + } + } catch (SQLException e) { + LOGGER.error(e); + } + return list; + } + + void insertCandidates( + final UUID id, + final Float score, + final String gID) throws SQLException { + psqlClient.connect(); + try (Statement stmt = psqlClient.createStatement()) { + psqlClient.setAutoCommit(false); + String sql = + "INSERT INTO candidates (notification_id, score, golden_id)" + " VALUES ('" + id + "','" + score + "', '" + gID + "')"; + stmt.addBatch(sql); + + + stmt.executeBatch(); + psqlClient.commit(); + } + } + + void updateNotificationState(final String notificationId, final String oldGoldenId, final String currentGoldenId) throws SQLException { + psqlClient.connect(); + String sql = String.format(Locale.ROOT, "update notification set state = '%s', old_golden_id = '%s', current_golden_id = '%s' where id = '%s'", + "CLOSED", + oldGoldenId, + currentGoldenId, + notificationId); + try (PreparedStatement stmt = psqlClient.prepareStatement(sql)) { + int rowsAffected = stmt.executeUpdate(); + if (rowsAffected > 0) { + LOGGER.info("Updated notification {} with new currentGoldenId {}", notificationId, currentGoldenId); + psqlClient.commit(); + } else { + LOGGER.warn("Notification with ID {} not found", notificationId); + } + } + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Routes.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Routes.java new file mode 100644 index 000000000..545e1cb76 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/Routes.java @@ -0,0 +1,111 @@ +package org.jembi.jempi.backuprestoreapi; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.ActorSystem; +import akka.http.javadsl.marshallers.jackson.Jackson; +import akka.http.javadsl.marshalling.Marshaller; +import akka.http.javadsl.model.RequestEntity; +import akka.http.javadsl.model.StatusCodes; +import akka.http.javadsl.server.Route; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.libmpi.MpiServiceError; +import org.jembi.jempi.shared.models.ApiModels; +import org.jembi.jempi.shared.models.GlobalConstants; +import org.jembi.jempi.shared.models.RestoreGoldenRecords; + +import java.util.Map; +import java.util.function.Function; + +import static akka.http.javadsl.server.Directives.*; +import static org.jembi.jempi.backuprestoreapi.MapError.mapError; +import static org.jembi.jempi.shared.utils.AppUtils.OBJECT_MAPPER; + +public final class Routes { + + private static final Logger LOGGER = LogManager.getLogger(Routes.class); + private static final Marshaller JSON_MARSHALLER = Jackson.marshaller(OBJECT_MAPPER); + private static final Function, String> PARAM_STRING = Map.Entry::getValue; + + private Routes() { + } + + private static Route getGidsAll( + final ActorSystem actorSystem, + final ActorRef backEnd) { + return onComplete(Ask.getGidsAll(actorSystem, backEnd), + result -> { + if (!result.isSuccess()) { + final var e = result.failed().get(); + LOGGER.error(e.getLocalizedMessage(), e); + return mapError(new MpiServiceError.InternalError( + e.getLocalizedMessage())); + } + return complete(StatusCodes.OK, result.get(), JSON_MARSHALLER); + }); + } + + private static Route postGoldenRecord( + final ActorSystem actorSystem, + final ActorRef backEnd) { + return entity(Jackson.unmarshaller(RestoreGoldenRecords.class), + payload -> payload != null + ? onComplete(Ask.postGoldenRecord(actorSystem, backEnd, payload), + result -> { + if (result.isSuccess()) { + final var updatedFields = result.get().goldenRecord(); + if (updatedFields.isEmpty()) { + return complete(StatusCodes.BAD_REQUEST); + } else { + return complete(StatusCodes.OK, + result.get(), + JSON_MARSHALLER); + } + } else { + return complete(StatusCodes.INTERNAL_SERVER_ERROR); + } + }) + : complete(StatusCodes.NO_CONTENT)); + } + + private static Route getExpandedGoldenRecord( + final ActorSystem actorSystem, + final ActorRef backEnd) { + return entity(Jackson.unmarshaller(ApiModels.ApiGoldenRecords.class), request -> { + return onComplete(Ask.getExpandedGoldenRecord(actorSystem, backEnd, request), + result -> { + if (!result.isSuccess()) { + final var e = result.failed().get(); + LOGGER.error(e.getLocalizedMessage(), e); + return mapError(new MpiServiceError.InternalError(e.getLocalizedMessage())); + } + return result.get() + .goldenRecord() + .mapLeft(MapError::mapError) + .fold(error -> error, + goldenRecord -> complete(StatusCodes.OK, + ApiModels.ApiExpandedGoldenRecord + .fromExpandedGoldenRecord(goldenRecord), + Jackson.marshaller(OBJECT_MAPPER))); + }); + }); + } + + public static Route createCoreAPIRoutes( + final ActorSystem actorSystem, + final ActorRef backEnd + ) { + return concat(post(() -> concat( + /* proxy for linker/controller services*/ + path(GlobalConstants.SEGMENT_POST_EXPANDED_GOLDEN_RECORD, + () -> Routes.getExpandedGoldenRecord(actorSystem, backEnd)), + path(GlobalConstants.SEGMENT_POST_GOLDEN_RECORD_RESTORE, + () -> Routes.postGoldenRecord(actorSystem, backEnd)) + )), + get(() -> concat( + path(GlobalConstants.SEGMENT_GET_GIDS_ALL, + () -> Routes.getGidsAll(actorSystem, backEnd)) + ))); + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/BackupRestoreAPI.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/BackupRestoreAPI.java new file mode 100644 index 000000000..d3ede38db --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/BackupRestoreAPI.java @@ -0,0 +1,71 @@ +package org.jembi.jempi.backuprestoreapi.api; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.ActorSystem; +import akka.actor.typed.Behavior; +import akka.actor.typed.Terminated; +import akka.actor.typed.javadsl.Behaviors; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jembi.jempi.backuprestoreapi.AppConfig; +import org.jembi.jempi.backuprestoreapi.BackEnd; +import org.jembi.jempi.backuprestoreapi.JsonFieldsConfig; + +import java.util.UUID; + +public final class BackupRestoreAPI { + + private static final Logger LOGGER = LogManager.getLogger(BackupRestoreAPI.class); + private static final String CONFIG_RESOURCE_FILE_NAME = "config-api.json"; + private final JsonFieldsConfig jsonFieldsConfig = new JsonFieldsConfig(CONFIG_RESOURCE_FILE_NAME); + private HttpServer httpServer; + + + private BackupRestoreAPI() { + LOGGER.info("BackupRestoreAPI startedI"); + } + + public static void main(final String[] args) { + try { + new BackupRestoreAPI().run(); + } catch (Exception e) { + LOGGER.error(e.getLocalizedMessage(), e); + } + } + + public Behavior create() { + return Behaviors.setup(context -> { + ActorRef backEnd = context.spawn(BackEnd.create(AppConfig.GET_LOG_LEVEL, + AppConfig.getDGraphHosts(), + AppConfig.getDGraphPorts(), + AppConfig.POSTGRESQL_IP, + AppConfig.POSTGRESQL_PORT, + AppConfig.POSTGRESQL_USER, + AppConfig.POSTGRESQL_PASSWORD, + AppConfig.POSTGRESQL_NOTIFICATIONS_DB, + AppConfig.POSTGRESQL_AUDIT_DB, + AppConfig.KAFKA_BOOTSTRAP_SERVERS, + "CLIENT_ID_API-" + UUID.randomUUID()), "BackEnd"); + context.watch(backEnd); + httpServer = HttpServer.create(); + httpServer.open("0.0.0.0", AppConfig.BACKUP_RESTORE_API_HTTP_PORT, context.getSystem(), backEnd, jsonFieldsConfig.jsonFields); + return Behaviors.receive(Void.class).onSignal(Terminated.class, sig -> { + httpServer.close(context.getSystem()); + return Behaviors.stopped(); + }).build(); + }); + } + + private void run() { + LOGGER.info("interface:port {}:{}", "0.0.0.0", AppConfig.BACKUP_RESTORE_API_HTTP_PORT); + try { + LOGGER.info("Loading fields configuration file"); + jsonFieldsConfig.load(CONFIG_RESOURCE_FILE_NAME); + LOGGER.info("Fields configuration file successfully loaded."); + ActorSystem.create(this.create(), "BackupRestoreAPI-App"); + } catch (Exception e) { + LOGGER.error("Unable to start the BackupRestoreAPI", e); + } + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/HttpServer.java b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/HttpServer.java new file mode 100644 index 000000000..435bbd4d1 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/java/org/jembi/jempi/backuprestoreapi/api/HttpServer.java @@ -0,0 +1,87 @@ +package org.jembi.jempi.backuprestoreapi.api; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.ActorSystem; +import akka.http.javadsl.Http; +import akka.http.javadsl.ServerBinding; +import akka.http.javadsl.model.HttpEntity; +import akka.http.javadsl.model.StatusCodes; +import akka.http.javadsl.server.AllDirectives; +import akka.http.javadsl.server.ExceptionHandler; +import akka.http.javadsl.server.RejectionHandler; +import akka.http.javadsl.server.Route; +import ch.megard.akka.http.cors.javadsl.settings.CorsSettings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.config.Configurator; +import org.jembi.jempi.backuprestoreapi.AppConfig; +import org.jembi.jempi.backuprestoreapi.BackEnd; +import org.jembi.jempi.backuprestoreapi.Routes; +import org.jembi.jempi.shared.models.GlobalConstants; + +import java.util.concurrent.CompletionStage; + +import static ch.megard.akka.http.cors.javadsl.CorsDirectives.cors; + +public final class HttpServer extends AllDirectives { + + private static final Logger LOGGER = LogManager.getLogger(HttpServer.class); + + private CompletionStage binding = null; + private Http http = null; + + + private HttpServer() { + Configurator.setLevel(this.getClass(), AppConfig.GET_LOG_LEVEL); + } + + static HttpServer create() { + return new HttpServer(); + } + + public void open( + final String httpServerHost, + final int httpPort, + final ActorSystem actorSystem, + final ActorRef backEnd, + final String jsonFields) { + http = Http.get(actorSystem); + binding = http.newServerAt(httpServerHost, httpPort).bind(this.createCorsRoutes(actorSystem, backEnd, jsonFields)); + LOGGER.info("BackupRestoreAPI Server online at http://{}:{}", httpServerHost, httpPort); + } + + public void close(final ActorSystem actorSystem) { + binding.thenCompose(ServerBinding::unbind) // trigger unbinding from the port + .thenAccept(unbound -> actorSystem.terminate()); // and shutdown when done + } + + public Route createCorsRoutes( + final ActorSystem actorSystem, + final ActorRef backEnd, + final String jsonFields) { + final var settings = CorsSettings.create(AppConfig.CONFIG); + + final RejectionHandler rejectionHandler = RejectionHandler.defaultHandler().mapRejectionResponse(response -> { + if (response.entity() instanceof HttpEntity.Strict) { + String message = ((HttpEntity.Strict) response.entity()).getData().utf8String(); + LOGGER.warn(String.format("Request was rejected. Reason: %s", message)); + } + + return response; + }); + + final ExceptionHandler exceptionHandler = ExceptionHandler.newBuilder().match(Exception.class, x -> { + LOGGER.error("An exception occurred while executing the Route", x); + return complete(StatusCodes.INTERNAL_SERVER_ERROR, "An exception occurred, see server logs for details"); + }).build(); + + return cors(settings, + () -> pathPrefix("JeMPI", + () -> concat(Routes.createCoreAPIRoutes(actorSystem, + backEnd), + path(GlobalConstants.SEGMENT_POST_FIELDS_CONFIG, + () -> complete(StatusCodes.OK, jsonFields))))).seal(rejectionHandler, + exceptionHandler); + } + +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/application.conf b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/application.conf new file mode 100644 index 000000000..d2bdb4ed3 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/application.conf @@ -0,0 +1,73 @@ +akka.http { + session { + enabled = true + } + server { + idle-timeout = 10 s + request-timeout = 5 s + linger-timeout = 5 s + } +} + +akka-http-cors { + + # If enabled, allow generic requests (that are outside the scope of the specification) + # to pass through the directive. Else, strict CORS filtering is applied and any + # invalid request will be rejected. + allow-generic-http-requests = yes + + # Indicates whether the resource supports user credentials. If enabled, the header + # `Access-Control-Allow-Credentials` is set in the response, indicating that the + # actual request can include user credentials. Examples of user credentials are: + # cookies, HTTP authentication or client-side certificates. + allow-credentials = yes + + # List of origins that the CORS filter must allow. Can also be set to `*` to allow + # access to the resource from any origin. Controls the content of the + # `Access-Control-Allow-Origin` response header: if parameter is `*` and credentials + # are not allowed, a `*` is set in `Access-Control-Allow-Origin`. Otherwise, the + # origins given in the `Origin` request header are echoed. + # + # Hostname starting with `*.` will match any sub-domain. + # The scheme and the port are always strictly matched. + # + # The actual or preflight request is rejected if any of the origins from the request + # is not allowed. + allowed-origins = "*" + + # List of request headers that can be used when making an actual request. Controls + # the content of the `Access-Control-Allow-Headers` header in a preflight response: + # if parameter is `*`, the headers from `Access-Control-Request-Headers` are echoed. + # Otherwise the parameter list is returned as part of the header. + allowed-headers = "*" + + # List of methods that can be used when making an actual request. The list is + # returned as part of the `Access-Control-Allow-Methods` preflight response header. + # + # The preflight request will be rejected if the `Access-Control-Request-Method` + # header's method is not part of the list. + allowed-methods = ["GET", "POST", "PATCH", "HEAD", "OPTIONS"] + + # List of headers (other than simple response headers) that browsers are allowed to access. + # If not empty, this list is returned as part of the `Access-Control-Expose-Headers` + # header in the actual response. + exposed-headers = [] + + # When set, the amount of seconds the browser is allowed to cache the results of a preflight request. + # This value is returned as part of the `Access-Control-Max-Age` preflight response header. + # If `null`, the header is not added to the preflight response. + max-age = 1800 seconds +} + +worker { + max-post-records = 20 +} + +my-blocking-dispatcher { + type = Dispatcher + executor = "thread-pool-executor" + thread-pool-executor { + fixed-pool-size = 512 + } + throughput = 1 +} diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j.properties b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j.properties new file mode 100644 index 000000000..41a839aaf --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j.properties @@ -0,0 +1,8 @@ +# Root logger option +log4j.rootLogger=WARN, stdout + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.err +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n diff --git a/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j2.properties b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j2.properties new file mode 100644 index 000000000..44f3e0d46 --- /dev/null +++ b/JeMPI_Apps/JeMPI_BackupRestoreAPI/src/main/resources/log4j2.properties @@ -0,0 +1,14 @@ +status = warn + +appender.console.type = Console +appender.console.name = LogToConsole +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%-5level] %d{yyyy-MM-dd HH:mm:ss.SSS} %c{1}:%L - %m%n + +logger.app.name = org.jembi.jempi +logger.app.level = debug +logger.app.additivity = false +logger.app.appenderRef.console.ref = LogToConsole + +rootLogger.level = info +rootLogger.appenderRef.stdout.ref = LogToConsole diff --git a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPI.java b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPI.java index ec31bdc83..4f094fe05 100644 --- a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPI.java +++ b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPI.java @@ -163,6 +163,11 @@ public List findExpandedGoldenRecords(final List g return client.findExpandedGoldenRecords(goldenIds).data(); } + public String postGoldenRecord(final RestoreGoldenRecords goldenRecord) { + client.connect(); + return client.restoreGoldenRecord(goldenRecord); + } + public List findGoldenIds() { client.connect(); return client.findGoldenIds(); diff --git a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPIClientInterface.java b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPIClientInterface.java index 2472056ef..b6455b571 100644 --- a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPIClientInterface.java +++ b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/LibMPIClientInterface.java @@ -63,6 +63,8 @@ List fetchGoldenIds( List findLinkCandidates(DemographicData demographicData); + String restoreGoldenRecord(RestoreGoldenRecords goldenRecord); + List findMatchCandidates(DemographicData demographicData); PaginatedResultSet simpleSearchGoldenRecords( diff --git a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/DgraphMutations.java b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/DgraphMutations.java index bb9934230..92bd89015 100644 --- a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/DgraphMutations.java +++ b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/DgraphMutations.java @@ -1,5 +1,7 @@ package org.jembi.jempi.libmpi.dgraph; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.google.protobuf.ByteString; import io.dgraph.DgraphProto; import io.vavr.control.Either; @@ -14,6 +16,7 @@ import org.jembi.jempi.shared.config.DGraphConfig; import org.jembi.jempi.shared.models.*; import org.jembi.jempi.shared.utils.AppUtils; +import com.fasterxml.jackson.databind.ObjectMapper; import java.util.ArrayList; import java.util.List; @@ -25,6 +28,8 @@ import static org.jembi.jempi.shared.config.Config.FIELDS_CONFIG; final class DgraphMutations { + public static final ObjectMapper OBJECT_MAPPER = + new ObjectMapper().disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS).registerModule(new JavaTimeModule()); private static final Logger LOGGER = LogManager.getLogger(DgraphMutations.class); @@ -51,6 +56,25 @@ static String createInteractionTriple( return DGRAPH_CONFIG.mutationCreateInteractionTriple.formatted(params.toArray(Object[]::new)); } + static String createInteractionTripleWithUid( + final String interactionUid, + final AuxInteractionData auxInteractionData, + final DemographicData demographicData, + final String sourceUID) { + final List params = new ArrayList<>(23); + params.addAll(List.of(interactionUid, sourceUID)); + params.addAll(List.of(interactionUid, AppUtils.quotedValue(auxInteractionData.auxDateCreated().toString()))); + IntStream.range(0, FIELDS_CONFIG.userAuxInteractionFields.size()) + .forEach(i -> params.addAll(List.of( + interactionUid, + AppUtils.quotedValue(auxInteractionData.auxUserFields() + .get(i) + .value())))); + demographicData.fields.forEach(f -> params.addAll(List.of(interactionUid, AppUtils.quotedValue(f.value())))); + params.add(interactionUid); + return DGRAPH_CONFIG.mutationCreateInteractionTripleWithUID.formatted(params.toArray(Object[]::new)); + } + static String createLinkedGoldenRecordTriple( final AuxGoldenRecordData auxGoldenRecordData, final DemographicData demographicData, @@ -75,6 +99,30 @@ static String createLinkedGoldenRecordTriple( return DGRAPH_CONFIG.mutationCreateLinkedGoldenRecordTriple.formatted(params.toArray(Object[]::new)); } + static String createLinkedGoldenRecordTripleWithGoldenUID( + final String goldenUID, + final AuxGoldenRecordData auxGoldenRecordData, + final DemographicData demographicData, + final String interactionUID, + final String sourceUID, + final float score) { + final List params = new ArrayList<>(26); + params.addAll(List.of(goldenUID, sourceUID)); + params.addAll(List.of(goldenUID, AppUtils.quotedValue(auxGoldenRecordData.auxDateCreated().toString()))); + params.addAll(List.of(goldenUID, AppUtils.quotedValue(auxGoldenRecordData.auxAutoUpdateEnabled().toString()))); + IntStream.range(0, FIELDS_CONFIG.userAuxGoldenRecordFields.size()) + .forEach(i -> params.addAll(List.of( + goldenUID, + AppUtils.quotedValue(auxGoldenRecordData.auxUserFields() + .get(i) + .value())))); + + demographicData.fields.forEach(f -> params.addAll(List.of(goldenUID, AppUtils.quotedValue(f.value())))); + params.addAll(List.of(goldenUID, interactionUID, score)); + params.add(goldenUID); + return DGRAPH_CONFIG.mutationCreateLinkedGoldenRecordTripleWithUID.formatted(params.toArray(Object[]::new)); + } + LinkInfo addNewDGraphInteraction(final Interaction interaction) { final var result = insertInteraction(interaction); if (result.interactionUID == null) { @@ -101,6 +149,109 @@ boolean updateGoldenRecordField( return updateGoldenRecordPredicate(goldenId, predicate, val); } + private String restoreSourceIdQuery(final SourceId sourceId) { + return """ + <%s> %s . + <%s> %s . + <%s> "SourceId" . + """.formatted(sourceId.uid(), + AppUtils.quotedValue(sourceId.facility()), + sourceId.uid(), + AppUtils.quotedValue(sourceId.patient()), + sourceId.uid()); + } + + public String restoreGoldenRecord(final RestoreGoldenRecords goldenRecord) { + String goldenID = ""; + + for (ApiModels.RestoreInteractionRecord restoreInteraction : goldenRecord.interactionsWithScore()) { + final var sourceId = restoreInteraction.interaction().sourceId(); + final var facility = sourceId.facility(); + final var patient = sourceId.patient(); + + restoreSourceIds(sourceId, facility, patient); + restoreInteraction(restoreInteraction, + sourceId, + restoreInteraction.interaction().sourceId().uid()); + + if (goldenID.isEmpty()) { + createGoldenRecord(goldenRecord, + restoreInteraction.interaction().uid(), + restoreInteraction.interaction().sourceId().uid()); + goldenID = goldenRecord.goldenRecord().uid(); + } else { + updateGoldenRecord(goldenID, + restoreInteraction.interaction().uid(), + restoreInteraction.interaction().sourceId().uid()); + } + } + + return goldenID; + } + + private String createGoldenRecord(final RestoreGoldenRecords goldenRecord, final String interactionID, final String sourceIdUid) { + var goldenDemographicData = DemographicData.fromCustomDemographicData(goldenRecord.goldenRecord().demographicData()); + var goldenData = new Interaction( + null, + null, + AuxInteractionData.fromCustomAuxInteractionData(goldenRecord.goldenRecord().auxInteractionData()), + goldenDemographicData); + + return cloneGoldenRecordFromInteractionWithGoldenUid( + goldenRecord.goldenRecord().uid(), + goldenData.demographicData(), + interactionID, + sourceIdUid, + 1.0F, + new AuxGoldenRecordData(goldenData.auxInteractionData())); + } + + private void updateGoldenRecord(final String goldenID, final String interactionID, final String sourceIdUid) { + var interactionScoreList = new ArrayList(); + var goldenIdScore = new LibMPIClientInterface.GoldenIdScore(goldenID, 1.0F); + + interactionScoreList.add(new DgraphPairWithScore(goldenID, interactionID, goldenIdScore.score())); + addScoreFacets(interactionScoreList); + addSourceId(goldenID, sourceIdUid); + } + + private String restoreInteraction(final ApiModels.RestoreInteractionRecord restoreInteraction, final SourceId sourceId, final String sourceIdUid) { + var interactionDemographicData = DemographicData.fromCustomDemographicData( + restoreInteraction.interaction().demographicData()); + + var interaction = new Interaction( + restoreInteraction.interaction().uid(), + sourceId, + AuxInteractionData.fromCustomAuxInteractionData(restoreInteraction.interaction().auxInteractionData()), + interactionDemographicData); + + var interactionNquads = createInteractionTripleWithUid( + interaction.interactionId(), + interaction.auxInteractionData(), + interaction.demographicData(), + sourceIdUid); + + var interactionMutation = DgraphProto.Mutation.newBuilder() + .setSetNquads(ByteString.copyFromUtf8(interactionNquads)) + .build(); + + return DgraphClient.getInstance().doMutateTransaction(interactionMutation); + } + + private String restoreSourceIds(final SourceId sourceId, final String facility, final String patient) { + var restoreSourceIdQuery = restoreSourceIdQuery(sourceId); + var sourceIdMutation = DgraphProto.Mutation.newBuilder() + .setSetNquads(ByteString.copyFromUtf8(restoreSourceIdQuery)) + .build(); + + var sourceIdList = DgraphQueries.findSourceIdList(facility, patient); + if (sourceIdList.isEmpty()) { + return DgraphClient.getInstance().doMutateTransaction(sourceIdMutation); + } else { + return sourceIdList.getFirst().uid(); + } + } + private String createSourceIdTriple(final SourceId sourceId) { final String uuid = UUID.randomUUID().toString(); return """ @@ -267,6 +418,25 @@ private String cloneGoldenRecordFromInteraction( return DgraphClient.getInstance().doMutateTransaction(mutation); } + private String cloneGoldenRecordFromInteractionWithGoldenUid( + final String goldenUID, + final DemographicData interaction, + final String interactionUID, + final String sourceUID, + final float score, + final AuxGoldenRecordData customUniqueGoldenRecordData) { + final var command = DgraphMutations.createLinkedGoldenRecordTripleWithGoldenUID( + goldenUID, + customUniqueGoldenRecordData, + interaction, + interactionUID, + sourceUID, + score); + final DgraphProto.Mutation mutation = + DgraphProto.Mutation.newBuilder().setSetNquads(ByteString.copyFromUtf8(command)).build(); + return DgraphClient.getInstance().doMutateTransaction(mutation); + } + private void deleteGoldenRecord(final String goldenId) { final var mutation = DgraphProto .Mutation diff --git a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/LibDgraph.java b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/LibDgraph.java index 61ecf1dad..6b6c3bd83 100644 --- a/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/LibDgraph.java +++ b/JeMPI_Apps/JeMPI_LibMPI/src/main/java/org/jembi/jempi/libmpi/dgraph/LibDgraph.java @@ -75,11 +75,15 @@ public List findExpandedInteractions(final List int public Either> findGoldenRecords(final List ids) { return DgraphQueries.findGoldenRecords(ids); } - public PaginatedResultSet findExpandedGoldenRecords(final List goldenIds) { return DgraphQueries.getExpandedGoldenRecords(goldenIds); } + public String restoreGoldenRecord( + final RestoreGoldenRecords goldenRecord) { + return dgraphMutations.restoreGoldenRecord(goldenRecord); + } + public List findGoldenIds() { return DgraphQueries.getGoldenIds(); } diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/DGraphConfig.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/DGraphConfig.java index b3dca71e0..4f320b826 100644 --- a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/DGraphConfig.java +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/DGraphConfig.java @@ -22,7 +22,9 @@ public class DGraphConfig { public final String mutationCreateAdditionalNodeFields; public final String mutationCreateAdditionalNodeType; public final String mutationCreateInteractionTriple; + public final String mutationCreateInteractionTripleWithUID; public final String mutationCreateLinkedGoldenRecordTriple; + public final String mutationCreateLinkedGoldenRecordTripleWithUID; public final String goldenRecordFieldNames; public final String expandedGoldenRecordFieldNames; public final String interactionFieldNames; @@ -42,7 +44,9 @@ public DGraphConfig(final JsonConfig jsonConfig) { mutationCreateAdditionalNodeFields = MutationCreateAdditionalNodeFields.create(jsonConfig); mutationCreateAdditionalNodeType = MutationCreateAdditionalNodeType.create(jsonConfig); mutationCreateInteractionTriple = MutationCreateInteractionTriple.create(jsonConfig); + mutationCreateInteractionTripleWithUID = MutationCreateInteractionTripleWithUID.create(jsonConfig); mutationCreateLinkedGoldenRecordTriple = MutationCreateLinkedGoldenRecordTriple.create(jsonConfig); + mutationCreateLinkedGoldenRecordTripleWithUID = MutationCreateLinkedGoldenRecordTripleWithUID.create(jsonConfig); goldenRecordFieldNames = GoldenRecordFieldNames.create(jsonConfig); expandedGoldenRecordFieldNames = ExpandedGoldenRecordFieldNames.create(jsonConfig); interactionFieldNames = InteractionFieldNames.create(jsonConfig); diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateInteractionTripleWithUID.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateInteractionTripleWithUID.java new file mode 100644 index 000000000..9d52c76af --- /dev/null +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateInteractionTripleWithUID.java @@ -0,0 +1,34 @@ +package org.jembi.jempi.shared.config.dgraph; + +import org.jembi.jempi.shared.config.input.JsonConfig; + +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public final class MutationCreateInteractionTripleWithUID { + + private MutationCreateInteractionTripleWithUID() { + } + + public static String create(final JsonConfig jsonConfig) { + return """ + <%s> <%s> . + <%s> %s^^ . + """ + + IntStream.range(0, jsonConfig.auxInteractionFields().size()) + .filter(i -> !(jsonConfig.auxInteractionFields().get(i).scFieldName().equals("aux_date_created"))) + .mapToObj(i -> "<%%s> ")).collect( + Collectors.joining(System.lineSeparator())) + + System.lineSeparator() + + jsonConfig.demographicFields() + .stream() + .map(demographicField -> "<%%s> ")) + .collect(Collectors.joining(System.lineSeparator())) + + System.lineSeparator() + + """ + <%s> "Interaction" . + """; + } + +} diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateLinkedGoldenRecordTripleWithUID.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateLinkedGoldenRecordTripleWithUID.java new file mode 100644 index 000000000..006ca8138 --- /dev/null +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/config/dgraph/MutationCreateLinkedGoldenRecordTripleWithUID.java @@ -0,0 +1,40 @@ +package org.jembi.jempi.shared.config.dgraph; + +import org.jembi.jempi.shared.config.input.JsonConfig; + +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public final class MutationCreateLinkedGoldenRecordTripleWithUID { + + private MutationCreateLinkedGoldenRecordTripleWithUID() { + } + + public static String create(final JsonConfig jsonConfig) { + return """ + <%s> <%s> . + <%s> %s^^ . + <%s> %s^^ .""" + + System.lineSeparator() + + IntStream.range(0, jsonConfig.auxGoldenRecordFields().size()) + .filter(i -> !(jsonConfig.auxGoldenRecordFields().get(i).scFieldName().equals("aux_date_created") + || jsonConfig.auxGoldenRecordFields() + .get(i) + .scFieldName() + .equals("aux_auto_update_enabled"))) + .mapToObj(i -> "<%%s> ")).collect( + Collectors.joining(System.lineSeparator())) + + System.lineSeparator() + + jsonConfig.demographicFields() + .stream() + .map(demographicField -> "<%%s> ")) + .collect(Collectors.joining(System.lineSeparator())) + + System.lineSeparator() + + """ + <%s> <%s> (score=%f) . + <%s> "GoldenRecord" . + """; + } + +} diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/ApiModels.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/ApiModels.java index 46bb1dee5..219433701 100644 --- a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/ApiModels.java +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/ApiModels.java @@ -104,6 +104,26 @@ public record ApiCrRegisterRequest( @JsonProperty("demographicData") JsonNode demographicData) { } + @JsonInclude(JsonInclude.Include.NON_NULL) + public record RestoreGoldenRecord( + @JsonProperty("uid") String uid, + @JsonProperty("sourceId") List sourceId, + @JsonProperty("uniqueGoldenRecordData") JsonNode auxInteractionData, + @JsonProperty("demographicData") JsonNode demographicData) { + } + @JsonInclude(JsonInclude.Include.NON_NULL) + public record RestoreInteraction( + @JsonProperty("uid") String uid, + @JsonProperty("sourceId") SourceId sourceId, + @JsonProperty("uniqueInteractionData") JsonNode auxInteractionData, + @JsonProperty("demographicData") JsonNode demographicData) { + } + @JsonInclude(JsonInclude.Include.NON_NULL) + public record RestoreInteractionRecord( + @JsonProperty("interaction") RestoreInteraction interaction, + @JsonProperty("score") Float score) { + } + @JsonInclude(JsonInclude.Include.NON_NULL) public record ApiCrLinkToGidUpdateRequest( @JsonProperty("gid") String gid, diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/GlobalConstants.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/GlobalConstants.java index 292c240fb..101d561a0 100644 --- a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/GlobalConstants.java +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/GlobalConstants.java @@ -63,6 +63,7 @@ public final class GlobalConstants { public static final String SEGMENT_VALIDATE_OAUTH = "authenticate"; public static final String SEGMENT_LOGOUT = "logout"; public static final String SEGMENT_CURRENT_USER = "currentUser"; + public static final String SEGMENT_POST_GOLDEN_RECORD_RESTORE = "restoreGoldenRecord"; // TIMEOUTS public static final int TIMEOUT_DGRAPH_RECONNECT_RETRIES = 20; diff --git a/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/RestoreGoldenRecords.java b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/RestoreGoldenRecords.java new file mode 100644 index 000000000..cdd144542 --- /dev/null +++ b/JeMPI_Apps/JeMPI_LibShared/src/main/java/org/jembi/jempi/shared/models/RestoreGoldenRecords.java @@ -0,0 +1,11 @@ +package org.jembi.jempi.shared.models; + +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.List; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public record RestoreGoldenRecords( + ApiModels.RestoreGoldenRecord goldenRecord, + List interactionsWithScore) { +} diff --git a/JeMPI_Apps/build-all-java.sh b/JeMPI_Apps/build-all-java.sh index f9c598903..d0e9ca527 100755 --- a/JeMPI_Apps/build-all-java.sh +++ b/JeMPI_Apps/build-all-java.sh @@ -3,9 +3,6 @@ set -e set -u -# cp -L -f ../JeMPI_Configuration/config-api.json ./JeMPI_API/src/main/resources/config-api.json -# cp -L -f ./JeMPI_Configuration/config-api.json ./JeMPI_API_KC/src/main/resources/config-api.json - mvn clean package pushd JeMPI_EM_Scala sbt clean assembly @@ -35,6 +32,9 @@ popd pushd JeMPI_Bootstrapper ./build.sh || exit 1 popd +pushd JeMPI_BackupRestoreAPI + ./build.sh || exit 1 +popd pushd JeMPI_AsyncReceiver ./push.sh popd @@ -60,3 +60,7 @@ pushd JeMPI_Bootstrapper ./push.sh popd +pushd JeMPI_BackupRestoreAPI + ./push.sh +popd + diff --git a/JeMPI_Apps/pom.xml b/JeMPI_Apps/pom.xml index 5c70ae04a..76ae5ec4c 100644 --- a/JeMPI_Apps/pom.xml +++ b/JeMPI_Apps/pom.xml @@ -539,6 +539,7 @@ JeMPI_Controller JeMPI_Linker JeMPI_Bootstrapper + JeMPI_BackupRestoreAPI diff --git a/devops/linux/docker/backup_restore/dgraph-backup-api.sh b/devops/linux/docker/backup_restore/dgraph-backup-api.sh new file mode 100755 index 000000000..b47d5e5c3 --- /dev/null +++ b/devops/linux/docker/backup_restore/dgraph-backup-api.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +set -e +set -u +pushd . + BACKUP_DATE_TIME=$1 + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/.. + JEMPI_DOCKER_HOME=$PWD + # JEMPI_HOME = $1 + down_dir="$JEMPI_DOCKER_HOME/deployment/down" + reboot_dir="$JEMPI_DOCKER_HOME/deployment/reboot" + backup_restore_dir="$JEMPI_DOCKER_HOME/backup_restore" + + python_cmd=$(which python3 || which python) + echo $python_cmd + # Function to stop services + stop_services() { + pushd "$down_dir" + echo "Stopping API service" + source d-stack-stop-services.sh + popd + } + + # Function to start backup restore API service + start_backup_restore_service() { + pushd "$reboot_dir" + echo "Starting Backup Restore API service" + source d-stack-start-backup-restore-api-services.sh + popd + } + + # Function to backup data + backup_data() { + pushd "$backup_restore_dir" + sleep 20 + echo "Started Backup through API" + $python_cmd dgraph-backup.py $BACKUP_DATE_TIME + sleep 10 + # sudo bash dgraph-backup.sh + # sudo bash postgres-backup.sh + popd + } + + # Function to start services + start_services() { + pushd "$reboot_dir" + echo "Starting API service" + source d-stack-start-services.sh + popd + } + + # Function to stop backup restore API service + stop_backup_restore_service() { + pushd "$down_dir" + echo "Stopping Backup Restore API service" + source d-stack-stop-backup-restore-api-services.sh + popd + } + + stop_services + start_backup_restore_service + backup_data + start_services + stop_backup_restore_service +popd \ No newline at end of file diff --git a/devops/linux/docker/backup_restore/dgraph-backup.py b/devops/linux/docker/backup_restore/dgraph-backup.py new file mode 100644 index 000000000..b6250bb39 --- /dev/null +++ b/devops/linux/docker/backup_restore/dgraph-backup.py @@ -0,0 +1,63 @@ +import requests +import json +from datetime import datetime +import os +from dotenv import dotenv_values +import sys + +env_vars = dotenv_values('../conf.env') +host = env_vars['NODE1_IP'] +port = "50010" +backup_path = env_vars['DGRAPH_BACKUP_DIRECTORY'] + +if len(sys.argv) >= 1: + current_datetime = sys.argv[1] +else: + current_datetime = datetime.now().strftime('%Y%m%d_%H%M%S') + +def create_folder_if_not_exists(folder_path): + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + +# Function to fetch data for a single ID +def fetch_data_for_id(gid): + get_expanded_golden_record = f'http://{host}:{port}/JeMPI/expandedGoldenRecord' + payload = json.dumps({"gid": gid}) + headers = {'Content-Type': 'application/json'} + response = requests.post(get_expanded_golden_record, headers=headers, data=payload) + return response.json() if response.status_code == 200 else None + + +# Function to backup Dgraph data +def backup_dgraph_data(): + get_gid_url = f'http://{host}:{port}/JeMPI/gidsAll' + print("Fetching list of all golden record id's") + response = requests.get(get_gid_url) + if response.status_code == 200: + new_golden_records = response.json() + backup_data = [] + for gid in new_golden_records.get("records"): + golden_record_data = fetch_data_for_id(gid) + if golden_record_data: + backup_data.append(golden_record_data) + + file_name = f'dgraph_backup_{current_datetime}.json' + print(f'Total {str(len(backup_data))} Golden records backed up.') + + backup_path_folder = create_backup_json(backup_data, file_name) + + print(f'All data saved to {backup_path_folder + "/" + file_name}') + else: + print('Failed to retrieve list of IDs from the API') + + +def create_backup_json(backup_data, file_name): + backup_path_folder = os.path.join(backup_path, current_datetime) + create_folder_if_not_exists(backup_path_folder) + with open(os.path.join(backup_path_folder, file_name), 'w') as json_file: + json.dump(backup_data, json_file, indent=4) + return backup_path_folder + + +backup_dgraph_data() diff --git a/devops/linux/docker/backup_restore/dgraph-restore-api.py b/devops/linux/docker/backup_restore/dgraph-restore-api.py new file mode 100644 index 000000000..ba6c0a0d0 --- /dev/null +++ b/devops/linux/docker/backup_restore/dgraph-restore-api.py @@ -0,0 +1,91 @@ +import json +import requests +import sys +import os +from datetime import datetime +from dotenv import dotenv_values +import uuid + +env_vars = dotenv_values('../conf.env') + +host = env_vars['NODE1_IP'] +port = "50010" + + +def main(json_file): + # Ensure JSON file exists + if not os.path.exists(json_file): + print(f"File {json_file} not found.") + sys.exit(1) + + # Read the JSON file + with open(json_file, 'r') as file: + data = json.load(file) + + # Check if the data is a list + if not isinstance(data, list): + print(f"Expected a list in the JSON file but got {type(data).__name__}.") + sys.exit(1) + + # Process the data + process_json_data(data) + + print("JSON data processed successfully.") + + +def convert_datetime_format(date_str): + input_formats = [ + "%Y-%m-%dT%H:%M:%S.%f", # With milliseconds + "%Y-%m-%dT%H:%M:%S" # Without milliseconds + ] + for input_format in input_formats: + try: + dt = datetime.strptime(date_str[:26], input_format) # Take only the first 26 characters to match the format + break + except ValueError: + continue + else: + return date_str # If the format is not correct, return the original string + + output_format = "%Y-%m-%dT%H:%M:%S.%fZ" + output_str = dt.strftime(output_format) + output_str = output_str[:23] + 'Z' # Keep only the first 2 decimal places of the seconds part + return output_str + +def process_json_data(golden_records): + + for golden_record in golden_records: + + golden_record['goldenRecord']['uniqueGoldenRecordData']['auxDateCreated'] = convert_datetime_format(golden_record['goldenRecord']['uniqueGoldenRecordData']['auxDateCreated']) + unique_golden_record_data = golden_record['goldenRecord']['uniqueGoldenRecordData'] + if not unique_golden_record_data.get('auxGid'): + unique_golden_record_data['auxGid'] = golden_record['goldenRecord']['uid'] + + for interaction in golden_record['interactionsWithScore']: + interaction['interaction']['uniqueInteractionData']['auxDateCreated'] = convert_datetime_format( + interaction['interaction']['uniqueInteractionData']['auxDateCreated']) + unique_interaction_data = interaction['interaction']['uniqueInteractionData'] + if not unique_interaction_data.get('auxIid'): + unique_interaction_data['auxIid'] = interaction['interaction']['uid'] + print("------------------------------------------------------") + print(golden_record) + response = send_golden_record_to_api(golden_record) + print(response.text) + +def send_golden_record_to_api(golden_record_payload): + get_expanded_golden_record_url = f'http://{host}:{port}/JeMPI/restoreGoldenRecord' + payload = json.dumps(golden_record_payload) + headers = { + 'Content-Type': 'application/json' + } + response = requests.post(get_expanded_golden_record_url, headers=headers, data=payload) + return response + +if __name__ == "__main__": + # Check if a JSON file path is provided as an argument + if len(sys.argv) < 2: + print("Usage: python read_json.py ") + sys.exit(1) + + json_file = sys.argv[1] + main(json_file) diff --git a/devops/linux/docker/backup_restore/dgraph-restore-api.sh b/devops/linux/docker/backup_restore/dgraph-restore-api.sh new file mode 100755 index 000000000..af1129656 --- /dev/null +++ b/devops/linux/docker/backup_restore/dgraph-restore-api.sh @@ -0,0 +1,98 @@ +#!/bin/bash +source ../conf.env +#Backup Folder Name + + BACKUP_DATE_TIME=$1 + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/.. + JEMPI_DOCKER_HOME=$PWD + # JEMPI_HOME = $1 + down_dir="$JEMPI_DOCKER_HOME/deployment/down" + reboot_dir="$JEMPI_DOCKER_HOME/deployment/reboot" + backup_restore_dir="$JEMPI_DOCKER_HOME/backup_restore" + + python_cmd=$(which python3 || which python) + echo $python_cmd + Function to stop services + stop_services() { + pushd "$down_dir" + echo "Stopping API service" + source d-stack-stop-services.sh + popd + } + # Function to start backup restore API service + start_backup_restore_service() { + pushd "$reboot_dir" + echo "Starting Backup Restore API service" + source d-stack-start-backup-restore-api-services.sh + popd + } + + start_services() { + pushd "$reboot_dir" + echo "Starting API service" + source d-stack-start-services.sh + popd + } + + # Function to stop backup restore API service + stop_backup_restore_service() { + pushd "$down_dir" + echo "Stopping Backup Restore API service" + source d-stack-stop-backup-restore-api-services.sh + popd + } + +while true; do + echo "Backup API" + # Ask the user to enter a folder name + echo "Backup folder Path:- ${DGRAPH_BACKUP_DIRECTORY}" + pushd ${DGRAPH_BACKUP_DIRECTORY} + echo + echo "Recent 5 Backups list" + ls -lt --time=creation --sort=time | grep '^d' | tail -n 5 + echo + popd + read -p "Please enter your Dgraph Backup Folder Name: " BACKUP_FOLDER_NAME + + # Check if the folder exists + if [ -d "${DGRAPH_BACKUP_DIRECTORY}/$BACKUP_FOLDER_NAME" ]; then + echo "Folder '$BACKUP_FOLDER_NAME' exists!" + break # Exit the loop if the folder exists + else + echo "Folder '$BACKUP_FOLDER_NAME' does not exist, at ${DGRAPH_BACKUP_DIRECTORY}. " + echo "Please try again" + fi +done + +BACKUP_DIR="${DGRAPH_BACKUP_DIRECTORY}/$BACKUP_FOLDER_NAME" +backup_data() { + pushd "$backup_restore_dir" + local dir=$1 + echo "$backup_restore_dir" + echo "$dir" + sleep 20 + echo "Started Restore through API" + $python_cmd dgraph-restore-api.py $dir + sleep 10 + # sudo bash dgraph-backup.sh + # sudo bash postgres-backup.sh + popd + } + +stop_services +start_backup_restore_service + +./helper/bootstrapper/bootstrapper-docker.sh data resetAll + +for backup_file in ${BACKUP_DIR}/dgraph_backup*.json; do + # Assuming the first directory is for alpha nodes + echo "innnn" + backup_data $backup_file +done + +start_services +stop_backup_restore_service + + +echo $BACKUP_DIR diff --git a/devops/linux/docker/backup_restore/postgres-backup.sh b/devops/linux/docker/backup_restore/postgres-backup.sh index f5874ce60..33abe1b9c 100755 --- a/devops/linux/docker/backup_restore/postgres-backup.sh +++ b/devops/linux/docker/backup_restore/postgres-backup.sh @@ -1,12 +1,18 @@ #!/bin/bash source ../conf.env # Load Database Credentials from Environment Variables - +if [ -z "$1" ]; then + # Argument is empty, so set current datetime + BACKUP_DATE_TIME=$(date +%Y%m%d_%H%M%S) +else + # Argument is provided, use it as datetime + BACKUP_DATE_TIME=$1 +fi DB_NAME="${POSTGRESQL_DATABASE}" DB_USER="${POSTGRESQL_USERNAME}" DB_PASSWORD="${POSTGRESQL_PASSWORD}" DB_HOST="${POSTGRES_HOST:-localhost}" -BACKUP_PATH="${POSTGRES_BACKUP_DIRECTORY}/$(date +%Y%m%d_%H%M%S)" +BACKUP_PATH="${POSTGRES_BACKUP_DIRECTORY}/$BACKUP_DATE_TIME" OLD_LOGS_DIR="${BACKUP_PATH}/old_logs" # Directory to store old logs databases=("$POSTGRESQL_DATABASE" "$POSTGRESQL_USERS_DB" "$POSTGRESQL_NOTIFICATIONS_DB" "$POSTGRESQL_AUDIT_DB" "$POSTGRESQL_KC_TEST_DB") @@ -15,7 +21,7 @@ databases=("$POSTGRESQL_DATABASE" "$POSTGRESQL_USERS_DB" "$POSTGRESQL_NOTIFICATI [ ! -d "$BACKUP_PATH" ] && mkdir -p "$BACKUP_PATH" [ ! -d "$OLD_LOGS_DIR" ] && mkdir -p "$OLD_LOGS_DIR" -LOG_FILE="${BACKUP_PATH}/$(date +%Y%m%d_%H%M%S).log" +LOG_FILE="${BACKUP_PATH}/$BACKUP_DATE_TIME.log" # Check for Remote Server Details REMOTE_SERVER="${POSTGRES_BACKUP_REMOTE_SERVER}" @@ -28,10 +34,10 @@ backup_database() { # Loop through each database and dump it for db in "${databases[@]}"; do echo "db.. $db " - backup_file="${BACKUP_PATH}/${db}--$(date +%Y%m%d_%H%M%S).sql" + backup_file="${BACKUP_PATH}/${db}--$BACKUP_DATE_TIME.sql" echo "$(date) - Starting backup for database: ${db}" >> "${LOG_FILE}" - PGPASSWORD=$DB_PASSWORD pg_dump -h $DB_HOST -U $DB_USER -d $db -F c -f "${BACKUP_PATH}/${db}--$(date +%Y%m%d_%H%M%S).sql" + PGPASSWORD=$DB_PASSWORD pg_dump -h $DB_HOST -U $DB_USER -d $db -F c -f "${BACKUP_PATH}/${db}--$BACKUP_DATE_TIME.sql" echo "$(date) - Backup completed for database: ${db}" >> "${LOG_FILE}" done echo "Database Postgres Backup completed." @@ -42,7 +48,7 @@ copy_to_remote() { if [ -n "${REMOTE_SERVER}" ] && [ -n "${REMOTE_PATH}" ]; then for db in "${databases[@]}"; do echo "$(date) - Starting remote transfer" >> "${LOG_FILE}" - scp "${BACKUP_PATH}/${db}_$(date +%Y%m%d_%H%M%S).sql" ${REMOTE_SERVER}:${REMOTE_PATH} + scp "${BACKUP_PATH}/${db}_$BACKUP_DATE_TIME.sql" ${REMOTE_SERVER}:${REMOTE_PATH} echo "$(date) - Remote transfer completed" >> "${LOG_FILE}" done else diff --git a/devops/linux/docker/conf/env/conf-env-high-1-pc.template b/devops/linux/docker/conf/env/conf-env-high-1-pc.template index 31724e0b5..648479ae3 100644 --- a/devops/linux/docker/conf/env/conf-env-high-1-pc.template +++ b/devops/linux/docker/conf/env/conf-env-high-1-pc.template @@ -13,6 +13,9 @@ export STACK_NAME=jempi export API_IP=api export API_HTTP_PORT=${API_HTTP_PORT} +export BACKUP_RESTORE_API_IP=backup-restore-api +export BACKUP_RESTORE_API_HTTP_PORT=${BACKUP_RESTORE_API_HTTP_PORT} + export API_KC_IP=api_kc export API_HTTP_KC_PORT=${API_KC_HTTP_PORT} @@ -51,6 +54,7 @@ export DATA_POSTGRESQL_DB_DIR=${PROJECT_DATA_DIR}/postgres_db export POSTGRES_BACKUP_DIRECTORY=${PROJECT_DATA_DIR}/backups/postgres export DGRAPH_BACKUP_DIRECTORY=${PROJECT_DATA_DIR}/backups/dgraph +export DATA_SYSTEM_CONFIG_DIR=${PROJECT_DIR}/data-config export DATA_DIR_ASYNC_RECEIVER=${PROJECT_DATA_APPS_DIR}/async_receiver export DATA_DIR_SYNC_RECEIVER=${PROJECT_DATA_APPS_DIR}/sync_receiver export DATA_DIR_ETL=${PROJECT_DATA_APPS_DIR}/etl @@ -98,6 +102,10 @@ export PLACEMENT_REGISTRY=${NODE1} export PLACEMENT_POSTGRESQL=${NODE1} export PLACEMENT_KEYCLOAK_TEST_SERVER=${NODE1} +export PLACEMENT_NODE1=${PLACEMENT_NODE1} +export PLACEMENT_NODE2=${PLACEMENT_NODE2} +export PLACEMENT_NODE3=${PLACEMENT_NODE3} + export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME} export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD} export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE} diff --git a/devops/linux/docker/conf/env/conf-env-low-1-pc.template b/devops/linux/docker/conf/env/conf-env-low-1-pc.template index 92afce8c1..3713cc783 100644 --- a/devops/linux/docker/conf/env/conf-env-low-1-pc.template +++ b/devops/linux/docker/conf/env/conf-env-low-1-pc.template @@ -13,6 +13,9 @@ export STACK_NAME=jempi export API_IP=api export API_HTTP_PORT=${API_HTTP_PORT} +export BACKUP_RESTORE_API_IP=backup-restore-api +export BACKUP_RESTORE_API_HTTP_PORT=${BACKUP_RESTORE_API_HTTP_PORT} + export API_KC_IP=api_kc export API_HTTP_KC_PORT=${API_KC_HTTP_PORT} diff --git a/devops/linux/docker/conf/env/create-env-linux-high-1.sh b/devops/linux/docker/conf/env/create-env-linux-high-1.sh index ea14e16a5..f5b97ae19 100755 --- a/devops/linux/docker/conf/env/create-env-linux-high-1.sh +++ b/devops/linux/docker/conf/env/create-env-linux-high-1.sh @@ -11,6 +11,9 @@ export PROJECT_DATA_MONITOR_DIR=${PROJECT_DIR}/docker_data/data-monitor export NODE1=$(hostname) # export NODE1_IP=$(ifconfig | grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | grep -v 127.0.0.1 | head -1 | awk '{ print $2 }') export NODE1_IP=$(hostname -i) +export PLACEMENT_NODE1=jempi1 +export PLACEMENT_NODE2=jempi2 +export PLACEMENT_NODE3=jempi3 export SCALE_KEYCLOAK_TEST_SERVER=1 export SCALE_KAFKA_01=1 @@ -24,7 +27,7 @@ export SCALE_ALPHA_02=1 export SCALE_ALPHA_03=1 export SCALE_RATEL=1 export SCALE_POSTGRESQL=1 -export SCALE_LINKER=1 +export SCALE_LINKER=3 export POSTGRESQL_USERNAME="postgres" export POSTGRESQL_PASSWORD="postgres" @@ -40,6 +43,7 @@ export DGRAPH_PORTS="9080,9081,9082" # Ports export API_HTTP_PORT=50000 +export BACKUP_RESTORE_API_HTTP_PORT=50000 export API_KC_HTTP_PORT=50000 export ETL_HTTP_PORT=50000 export CONTROLLER_HTTP_PORT=50000 diff --git a/devops/linux/docker/conf/env/create-env-linux-low-1.sh b/devops/linux/docker/conf/env/create-env-linux-low-1.sh index 1b75ad830..eb9536f5e 100755 --- a/devops/linux/docker/conf/env/create-env-linux-low-1.sh +++ b/devops/linux/docker/conf/env/create-env-linux-low-1.sh @@ -34,6 +34,7 @@ export DGRAPH_PORTS="9080" # Ports export API_HTTP_PORT=50000 +export BACKUP_RESTORE_API_HTTP_PORT=50000 export API_KC_HTTP_PORT=50000 export ETL_HTTP_PORT=50000 export CONTROLLER_HTTP_PORT=50000 diff --git a/devops/linux/docker/conf/env/create-env-wsl-low-1.sh b/devops/linux/docker/conf/env/create-env-wsl-low-1.sh index 9d6db1c93..467d49aa8 100755 --- a/devops/linux/docker/conf/env/create-env-wsl-low-1.sh +++ b/devops/linux/docker/conf/env/create-env-wsl-low-1.sh @@ -34,6 +34,7 @@ export DGRAPH_PORTS="9080" # Ports export API_HTTP_PORT=50000 +export BACKUP_RESTORE_API_HTTP_PORT=50000 export API_KC_HTTP_PORT=50000 export ETL_HTTP_PORT=50000 export CONTROLLER_HTTP_PORT=50000 diff --git a/devops/linux/docker/conf/haproxy/haproxy.cfg b/devops/linux/docker/conf/haproxy/haproxy.cfg index 233ea063d..39c1c2ba3 100644 --- a/devops/linux/docker/conf/haproxy/haproxy.cfg +++ b/devops/linux/docker/conf/haproxy/haproxy.cfg @@ -40,6 +40,10 @@ frontend fe_api bind *:50000 use_backend be_api if { path_beg /JeMPI } +frontend fe_backup-restore-api + bind *:50010 + use_backend fe_backup-restore-api if { path_beg /JeMPI } + frontend fe_api_kc bind *:50001 use_backend be_api_kc if { path_beg /JeMPI } @@ -54,6 +58,7 @@ frontend fe_stats-http + backend be_keycloak server keycloak jempi_keycloak-test-server:8080 check resolvers docker init-addr libc,none inter 1000 fall 300 @@ -61,6 +66,11 @@ backend be_api balance roundrobin server-template jempi_api- 1 jempi_api:50000 check resolvers docker init-addr libc,none +backend fe_backup-restore-api + balance roundrobin + server-template jempi_backup-restore-api- 1 jempi_backup-restore-api:50000 check resolvers docker init-addr libc,none + + backend be_api_kc balance roundrobin server-template jempi_api-kc- 1 jempi_api-kc:50000 check resolvers docker init-addr libc,none @@ -75,3 +85,4 @@ backend be_stats-http stats refresh 15s stats show-legends stats show-node + diff --git a/devops/linux/docker/conf/images/conf-app-images.sh b/devops/linux/docker/conf/images/conf-app-images.sh index 34ba9c403..7f51604d0 100644 --- a/devops/linux/docker/conf/images/conf-app-images.sh +++ b/devops/linux/docker/conf/images/conf-app-images.sh @@ -48,3 +48,7 @@ export UI_IMAGE=ui:1.0-SNAPSHOT export BOOTSTRAPPER_HUB_IMAGE=$JEMPI_HUB_NAMESPACE-bootstrapper:1.0-SNAPSHOT export BOOTSTRAPPER_IMAGE=bootstrapper:1.0-SNAPSHOT export BOOTSTRAPPER_JAR=Bootstrapper-1.0-SNAPSHOT-spring-boot.jar + +export BACKUP_RESTORE_API_HUB_IMAGE=$JEMPI_HUB_NAMESPACE-backuprestoreapi:1.0-SNAPSHOT +export BACKUP_RESTORE_API_IMAGE=backuprestoreapi:1.0-SNAPSHOT +export BACKUP_RESTORE_API_JAR=JeMPI_BackupRestoreAPI-1.0-SNAPSHOT-spring-boot.jar \ No newline at end of file diff --git a/devops/linux/docker/conf/stack/docker-stack-high-0.yml b/devops/linux/docker/conf/stack/docker-stack-high-0.yml index 60671ab9e..b460c5110 100644 --- a/devops/linux/docker/conf/stack/docker-stack-high-0.yml +++ b/devops/linux/docker/conf/stack/docker-stack-high-0.yml @@ -340,7 +340,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_01} + - node.labels.name == $PLACEMENT_NODE1 command: dgraph alpha --my=alpha-01:7080 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 --telemetry "reports=false; sentry=false;" alpha-02: @@ -373,7 +373,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_02} + - node.labels.name == $PLACEMENT_NODE2 command: dgraph alpha --my=alpha-02:7081 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 -o 1 --telemetry "reports=false; sentry=false;" alpha-03: @@ -406,7 +406,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_03} + - node.labels.name == $PLACEMENT_NODE3 command: dgraph alpha --my=alpha-03:7082 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 -o 2 --telemetry "reports=false; sentry=false;" ratel: @@ -557,6 +557,10 @@ services: source: $DATA_DIR_EM_SCALA/data target: /app/data read_only: true + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 0 @@ -602,6 +606,10 @@ services: source: $DATA_DIR_LINKER/conf target: /app/conf read_only: true + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 0 @@ -649,6 +657,10 @@ services: source: $DATA_DIR_ASYNC_RECEIVER/csv target: /app/csv read_only: false + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 0 @@ -707,6 +719,10 @@ services: source: $DATA_DIR_ASYNC_RECEIVER/csv target: /app/csv read_only: false + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 0 @@ -766,6 +782,11 @@ services: image: ${IMAGE_REGISTRY}$BOOTSTRAPPER_IMAGE networks: - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true environment: POSTGRESQL_IP: postgresql POSTGRESQL_PORT: 5432 @@ -782,6 +803,50 @@ services: DGRAPH_PORTS: ${DGRAPH_PORTS} deploy: mode: global + + backup-restore-api: + image: ${IMAGE_REGISTRY}${BACKUP_RESTORE_API_IMAGE} + environment: + LOG4J2_LEVEL: "DEBUG" + POSTGRESQL_IP: postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + POSTGRESQL_NOTIFICATIONS_DB: ${POSTGRESQL_NOTIFICATIONS_DB} + POSTGRESQL_AUDIT_DB: ${POSTGRESQL_AUDIT_DB} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_SERVERS} + KAFKA_APPLICATION_ID: app-id-backuprestoreapi + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + BACKUP_RESTORE_API_HTTP_PORT: ${BACKUP_RESTORE_API_HTTP_PORT} + API_HTTP_PORT: ${API_HTTP_PORT} + LINKER_IP: linker + LINKER_HTTP_PORT: 50000 + CONTROLLER_IP: controller + CONTROLLER_HTTP_PORT: ${CONTROLLER_HTTP_PORT} + networks: + - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true + deploy: + mode: replicated + replicas: 0 + resources: + limits: + memory: ${API_RAM_LIMIT} + update_config: + parallelism: 1 + delay: 10s + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 0 + placement: + constraints: + - node.labels.name == ${PLACEMENT_API} ui: image: ${IMAGE_REGISTRY}${UI_IMAGE} diff --git a/devops/linux/docker/conf/stack/docker-stack-high-1.yml b/devops/linux/docker/conf/stack/docker-stack-high-1.yml index 47d037991..182d1517a 100644 --- a/devops/linux/docker/conf/stack/docker-stack-high-1.yml +++ b/devops/linux/docker/conf/stack/docker-stack-high-1.yml @@ -129,7 +129,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == $PLACEMENT_KAFKA_01 + - node.labels.name == $PLACEMENT_NODE1 kafka-02: image: ${IMAGE_REGISTRY}$KAFKA_IMAGE @@ -169,7 +169,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == $PLACEMENT_KAFKA_02 + - node.labels.name == $PLACEMENT_NODE2 kafka-03: image: ${IMAGE_REGISTRY}$KAFKA_IMAGE @@ -209,7 +209,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == $PLACEMENT_KAFKA_03 + - node.labels.name == $PLACEMENT_NODE3 zero-01: image: ${IMAGE_REGISTRY}${DGRAPH_IMAGE} @@ -241,7 +241,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ZERO_01} + - node.labels.name == $PLACEMENT_NODE1 command: dgraph zero --my=zero-01:5080 --replicas 1 --telemetry "reports=false; sentry=false;" zero-02: @@ -274,7 +274,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ZERO_02} + - node.labels.name == $PLACEMENT_NODE2 command: dgraph zero --my=zero-02:5080 --raft idx=2 --peer zero-01:5080 --replicas 1 --telemetry "reports=false; sentry=false;" zero-03: @@ -307,7 +307,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ZERO_02} + - node.labels.name == $PLACEMENT_NODE3 command: dgraph zero --my=zero-03:5080 --raft idx=3 --peer zero-01:5080 --replicas 1 --telemetry "reports=false; sentry=false;" alpha-01: @@ -340,7 +340,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_01} + - node.labels.name == $PLACEMENT_NODE1 command: dgraph alpha --my=alpha-01:7080 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 --telemetry "reports=false; sentry=false;" alpha-02: @@ -373,7 +373,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_02} + - node.labels.name == $PLACEMENT_NODE2 command: dgraph alpha --my=alpha-02:7081 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 -o 1 --telemetry "reports=false; sentry=false;" alpha-03: @@ -406,7 +406,7 @@ services: condition: on-failure placement: constraints: - - node.labels.name == ${PLACEMENT_DGRAPH_ALPHA_03} + - node.labels.name == $PLACEMENT_NODE3 command: dgraph alpha --my=alpha-03:7082 --zero=zero-01:5080 --cache "size-mb=10240; percentage=50,30,20;" --security whitelist=0.0.0.0/0 -o 2 --telemetry "reports=false; sentry=false;" ratel: @@ -557,6 +557,10 @@ services: source: $DATA_DIR_EM_SCALA/data target: /app/data read_only: true + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 1 @@ -602,6 +606,10 @@ services: source: $DATA_DIR_LINKER/conf target: /app/conf read_only: true + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: ${SCALE_LINKER} @@ -649,6 +657,10 @@ services: source: $DATA_DIR_ASYNC_RECEIVER/csv target: /app/csv read_only: false + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 1 @@ -707,6 +719,10 @@ services: source: $DATA_DIR_ASYNC_RECEIVER/csv target: /app/csv read_only: false + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true deploy: mode: replicated replicas: 1 @@ -766,6 +782,11 @@ services: image: ${IMAGE_REGISTRY}$BOOTSTRAPPER_IMAGE networks: - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true environment: POSTGRESQL_IP: postgresql POSTGRESQL_PORT: 5432 @@ -783,6 +804,50 @@ services: deploy: mode: global + backup-restore-api: + image: ${IMAGE_REGISTRY}${BACKUP_RESTORE_API_IMAGE} + environment: + LOG4J2_LEVEL: "DEBUG" + POSTGRESQL_IP: postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + POSTGRESQL_NOTIFICATIONS_DB: ${POSTGRESQL_NOTIFICATIONS_DB} + POSTGRESQL_AUDIT_DB: ${POSTGRESQL_AUDIT_DB} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_SERVERS} + KAFKA_APPLICATION_ID: app-id-backuprestoreapi + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + BACKUP_RESTORE_API_HTTP_PORT: ${BACKUP_RESTORE_API_HTTP_PORT} + API_HTTP_PORT: ${API_HTTP_PORT} + LINKER_IP: linker + LINKER_HTTP_PORT: 50000 + CONTROLLER_IP: controller + CONTROLLER_HTTP_PORT: ${CONTROLLER_HTTP_PORT} + networks: + - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true + deploy: + mode: replicated + replicas: 0 + resources: + limits: + memory: ${API_RAM_LIMIT} + update_config: + parallelism: 1 + delay: 10s + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 0 + placement: + constraints: + - node.labels.name == ${PLACEMENT_API} + ui: image: ${IMAGE_REGISTRY}${UI_IMAGE} environment: diff --git a/devops/linux/docker/conf/stack/docker-stack-low-0.yml b/devops/linux/docker/conf/stack/docker-stack-low-0.yml index e540b23e5..e91e18c6f 100644 --- a/devops/linux/docker/conf/stack/docker-stack-low-0.yml +++ b/devops/linux/docker/conf/stack/docker-stack-low-0.yml @@ -71,6 +71,10 @@ services: target: 50001 protocol: tcp mode: host + - published: 50010 + target: 50010 + protocol: tcp + mode: host - published: 3000 target: 3000 protocol: tcp @@ -604,6 +608,50 @@ services: deploy: mode: global + backup-restore-api: + image: ${IMAGE_REGISTRY}${BACKUP_RESTORE_API_IMAGE} + environment: + LOG4J2_LEVEL: "DEBUG" + POSTGRESQL_IP: postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + POSTGRESQL_NOTIFICATIONS_DB: ${POSTGRESQL_NOTIFICATIONS_DB} + POSTGRESQL_AUDIT_DB: ${POSTGRESQL_AUDIT_DB} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_SERVERS} + KAFKA_APPLICATION_ID: app-id-backuprestoreapi + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + BACKUP_RESTORE_API_HTTP_PORT: ${BACKUP_RESTORE_API_HTTP_PORT} + API_HTTP_PORT: ${API_HTTP_PORT} + LINKER_IP: linker + LINKER_HTTP_PORT: 50000 + CONTROLLER_IP: controller + CONTROLLER_HTTP_PORT: ${CONTROLLER_HTTP_PORT} + networks: + - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true + deploy: + mode: replicated + replicas: 0 + resources: + limits: + memory: ${API_RAM_LIMIT} + update_config: + parallelism: 1 + delay: 10s + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 0 + placement: + constraints: + - node.labels.name == ${PLACEMENT_API} + ui: image: ${IMAGE_REGISTRY}${UI_IMAGE} environment: diff --git a/devops/linux/docker/conf/stack/docker-stack-low-1.yml b/devops/linux/docker/conf/stack/docker-stack-low-1.yml index 350a7ef35..8ec794d84 100644 --- a/devops/linux/docker/conf/stack/docker-stack-low-1.yml +++ b/devops/linux/docker/conf/stack/docker-stack-low-1.yml @@ -71,6 +71,10 @@ services: target: 50001 protocol: tcp mode: host + - published: 50010 + target: 50010 + protocol: tcp + mode: host - published: 3000 target: 3000 protocol: tcp @@ -604,6 +608,50 @@ services: deploy: mode: global + backup-restore-api: + image: ${IMAGE_REGISTRY}${BACKUP_RESTORE_API_IMAGE} + environment: + LOG4J2_LEVEL: "DEBUG" + POSTGRESQL_IP: postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + POSTGRESQL_NOTIFICATIONS_DB: ${POSTGRESQL_NOTIFICATIONS_DB} + POSTGRESQL_AUDIT_DB: ${POSTGRESQL_AUDIT_DB} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_SERVERS} + KAFKA_APPLICATION_ID: app-id-backuprestoreapi + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + BACKUP_RESTORE_API_HTTP_PORT: ${BACKUP_RESTORE_API_HTTP_PORT} + API_HTTP_PORT: ${API_HTTP_PORT} + LINKER_IP: linker + LINKER_HTTP_PORT: 50000 + CONTROLLER_IP: controller + CONTROLLER_HTTP_PORT: ${CONTROLLER_HTTP_PORT} + networks: + - backend + volumes: + - type: bind + source: $DATA_SYSTEM_CONFIG_DIR + target: /app/conf_system + read_only: true + deploy: + mode: replicated + replicas: 0 + resources: + limits: + memory: ${API_RAM_LIMIT} + update_config: + parallelism: 1 + delay: 10s + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 0 + placement: + constraints: + - node.labels.name == ${PLACEMENT_API} + ui: image: ${IMAGE_REGISTRY}${UI_IMAGE} environment: diff --git a/devops/linux/docker/deployment/build_and_reboot/d-stack-2-build-java.sh b/devops/linux/docker/deployment/build_and_reboot/d-stack-2-build-java.sh index fbaa41318..eb6f8b9db 100755 --- a/devops/linux/docker/deployment/build_and_reboot/d-stack-2-build-java.sh +++ b/devops/linux/docker/deployment/build_and_reboot/d-stack-2-build-java.sh @@ -10,6 +10,6 @@ echo echo "Build java apps" pwd pushd ../../../../../JeMPI_Apps - source ../../build-all-java.sh + source ./build-all-java.sh popd diff --git a/devops/linux/docker/deployment/deploy-local.sh b/devops/linux/docker/deployment/deploy-local.sh index 31bfe4225..19d36dfcf 100755 --- a/devops/linux/docker/deployment/deploy-local.sh +++ b/devops/linux/docker/deployment/deploy-local.sh @@ -6,6 +6,7 @@ echo "$JEMPI_HOME" export JAVA_VERSION=21.0.3-tem echo "Setting JEMPI_HOME to: $JEMPI_HOME" JEMPI_CONFIGURATION_PATH=$JEMPI_HOME/JeMPI_Apps/JeMPI_Configuration/reference/config-reference.json +JEMPI_ENV_CONFIGURATION=create-env-linux-low-1.sh # Display menu options echo "Select an option for local deployment:" @@ -83,7 +84,7 @@ run_enviroment_configuration_and_helper_script(){ # Navigate to environment configuration directory echo "Navigate to environment configuration directory" pushd "$JEMPI_HOME/devops/linux/docker/conf/env/" - source create-env-linux-low-1.sh + source $JEMPI_ENV_CONFIGURATION popd # Running Docker helper scripts @@ -130,7 +131,6 @@ pull_docker_images_and_push_local(){ } build_all_stack_and_reboot(){ # run_enviroment_configuration_and_helper_script - run_field_configuration_file # Build and reboot the entire stack echo "Build and reboot the entire stack" pushd "$JEMPI_HOME/devops/linux/docker/deployment/build_and_reboot" @@ -139,7 +139,7 @@ build_all_stack_and_reboot(){ } initialize_db_build_all_stack_and_reboot(){ echo "Create DB and Deploy" - pushd "$JEMPI_HOME/devops/linux/docker/deployment/from_scratch" + pushd "$JEMPI_HOME/devops/linux/docker/deployment/install_from_scratch" yes | source d-stack-1-create-db-build-all-reboot.sh popd } @@ -151,7 +151,7 @@ restore_dgraph_db(){ if [ "$dgraph_confirmation" == "yes" ] || [ "$dgraph_confirmation" == "y" ]; then pushd "$JEMPI_HOME/devops/linux/docker/backup_restore" echo "Starting Dgraph database restore..." - bash dgraph-restore.sh + bash dgraph-restore-api.sh echo "Database Dgraph restore completed." popd else @@ -177,9 +177,6 @@ restore_postgres_db(){ fi } - - - # Process user choice case $choice in 1) @@ -218,12 +215,13 @@ case $choice in exit 0 ;; 6) - echo "Backup" + BACKUP_DATE_TIME=$(date +%Y-%m-%d_%H%M%S) + echo "Started Backup at- $BACKUP_DATE_TIME" pushd "$JEMPI_HOME/devops/linux/docker/backup_restore" - sudo bash dgraph-backup.sh - sudo bash postgres-backup.sh - popd - + source dgraph-backup-api.sh $BACKUP_DATE_TIME + sudo bash postgres-backup.sh $BACKUP_DATE_TIME + popd + ;; 7) echo "Restore Databases" diff --git a/devops/linux/docker/deployment/down/d-stack-stop-backup-restore-api-services.sh b/devops/linux/docker/deployment/down/d-stack-stop-backup-restore-api-services.sh new file mode 100755 index 000000000..8dad4f085 --- /dev/null +++ b/devops/linux/docker/deployment/down/d-stack-stop-backup-restore-api-services.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +set -u +# SERVICE_NAME = $1 +pushd . + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/../.. + + source ./conf.env + + # docker service scale ${SERVICE_NAME}=1 + docker service scale jempi_backup-restore-api=0 + echo + +popd + + diff --git a/devops/linux/docker/deployment/down/d-stack-stop-services.sh b/devops/linux/docker/deployment/down/d-stack-stop-services.sh new file mode 100755 index 000000000..1d0d065d0 --- /dev/null +++ b/devops/linux/docker/deployment/down/d-stack-stop-services.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e +set -u +# SERVICE_NAME = $1 +pushd . + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/../.. + + source ./conf.env + + # docker service scale ${SERVICE_NAME}=0 + docker service scale jempi_api=0 + docker service scale jempi_async-receiver=0 + + echo + +popd + + diff --git a/devops/linux/docker/deployment/from_scratch/d-stack-1-create-db-build-all-reboot.sh b/devops/linux/docker/deployment/install_from_scratch/d-stack-1-create-db-build-all-reboot.sh similarity index 100% rename from devops/linux/docker/deployment/from_scratch/d-stack-1-create-db-build-all-reboot.sh rename to devops/linux/docker/deployment/install_from_scratch/d-stack-1-create-db-build-all-reboot.sh diff --git a/devops/linux/docker/deployment/reboot/d-stack-start-backup-restore-api-services.sh b/devops/linux/docker/deployment/reboot/d-stack-start-backup-restore-api-services.sh new file mode 100755 index 000000000..7142c577f --- /dev/null +++ b/devops/linux/docker/deployment/reboot/d-stack-start-backup-restore-api-services.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +set -u +# SERVICE_NAME = $1 +pushd . + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/../.. + + source ./conf.env + + # docker service scale ${SERVICE_NAME}=1 + docker service scale jempi_backup-restore-api=1 + echo + +popd + + diff --git a/devops/linux/docker/deployment/reboot/d-stack-start-services.sh b/devops/linux/docker/deployment/reboot/d-stack-start-services.sh new file mode 100755 index 000000000..833104d44 --- /dev/null +++ b/devops/linux/docker/deployment/reboot/d-stack-start-services.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e +set -u +# SERVICE_NAME = $1 +pushd . + SCRIPT_DIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P) + cd ${SCRIPT_DIR}/../.. + + source ./conf.env + + # docker service scale ${SERVICE_NAME}=1 + docker service scale jempi_api=1 + docker service scale jempi_async-receiver=1 + echo + +popd + +