From 0ba764daf3fb8478c475515e7be74b8e3d3acebd Mon Sep 17 00:00:00 2001 From: Arek Burdach Date: Tue, 25 Feb 2025 12:11:11 +0100 Subject: [PATCH] [NU-1979] Scenario statuses refactor: scenario status and deployment statuses are decoupled (#7566) --- .../src/components/Process/ProcessErrors.tsx | 22 - .../components/Process/ProcessStateIcon.tsx | 2 - .../client/src/components/Process/types.ts | 7 +- .../src/reducers/graph/utils.fixtures.ts | 5 - .../api/deployment/DeploymentManager.scala | 88 +-- .../deployment/DeploymentStatusDetails.scala | 20 + ...rridingProcessStateDefinitionManager.scala | 40 +- .../ProcessStateDefinitionManager.scala | 82 +-- ...CachingProcessStateDeploymentManager.scala | 45 +- .../InconsistentStateDetector.scala | 154 ------ .../services/ScheduledProcessListener.scala | 8 +- .../SimpleProcessStateDefinitionManager.scala | 34 +- .../deployment/simple/SimpleStateStatus.scala | 53 +- .../testing/DeploymentManagerStub.scala | 84 ++- ...ingProcessStateDefinitionManagerTest.scala | 20 +- .../deployment/SimpleProcessStateSpec.scala | 37 -- .../SimpleScenarioStatusDtoSpec.scala | 39 ++ .../deployment/StateStatusCodingSpec.scala | 51 -- ...ingProcessStateDeploymentManagerSpec.scala | 33 +- .../scenariodetails/ScenarioStatusDto.scala | 50 ++ .../scenariodetails/ScenarioWithDetails.scala | 4 +- .../ui/api/AppApiHttpService.scala | 10 +- .../ui/api/ManagementResources.scala | 1 - .../ui/api/ProcessesExportResources.scala | 2 - .../ui/api/ProcessesResources.scala | 15 +- .../ui/api/ScenarioStatusPresenter.scala | 42 ++ .../ui/process/ProcessService.scala | 42 +- .../ScenarioWithDetailsConversions.scala | 3 +- .../ui/process/deployment/ActionService.scala | 41 +- .../deployment/DeploymentService.scala | 36 +- .../deployment/ScenarioStateProvider.scala | 436 --------------- .../ScenarioTestExecutorService.scala | 5 +- ...oymentManagerReliableStatusesWrapper.scala | 65 +++ ...EngineSideDeploymentStatusesProvider.scala | 130 +++++ ...entsStatusesSynchronizationScheduler.scala | 62 +++ .../ScenarioDeploymentReconciler.scala | 54 ++ .../InconsistentStateDetector.scala | 126 +++++ .../ScenarioStatusProvider.scala | 244 +++++++++ .../exception/ProcessIllegalAction.scala | 15 +- ...entsStatusesSynchronizationScheduler.scala | 30 +- .../periodic/PeriodicDeploymentManager.scala | 43 +- .../periodic/PeriodicProcessService.scala | 265 ++++----- ...eriodicProcessStateDefinitionManager.scala | 83 ++- .../periodic/PeriodicStateStatus.scala | 61 ++- .../InvalidDeploymentManagerStub.scala | 34 +- .../DBFetchingProcessRepository.scala | 16 +- .../ScenarioWithDetailsEntity.scala | 9 +- .../DbScenarioActivityRepository.scala | 6 +- .../ScenarioActivityRepository.scala | 7 - .../server/AkkaHttpBasedRouteProvider.scala | 203 +++---- .../ui/statistics/ScenarioStatistics.scala | 2 +- ...sageStatisticsReportsSettingsService.scala | 6 +- .../nussknacker/ui/util/FutureUtils.scala | 14 +- .../ui/validation/NodeValidator.scala | 3 +- .../test/base/it/NuResourcesTest.scala | 26 +- .../test/mock/MockDeploymentManager.scala | 76 ++- .../test/mock/StubScenarioStateProvider.scala | 40 -- .../test/utils/domain/TestFactory.scala | 4 +- .../test/utils/domain/TestProcessUtil.scala | 2 +- .../api/AppApiHttpServiceBusinessSpec.scala | 25 +- .../api/AppApiHttpServiceSecuritySpec.scala | 72 +-- .../ui/api/ManagementResourcesSpec.scala | 2 +- .../ui/api/ProcessesResourcesSpec.scala | 17 +- .../DefaultComponentServiceSpec.scala | 6 +- .../NotificationServiceTest.scala | 41 +- .../ui/process/DBProcessServiceSpec.scala | 6 +- .../ProcessStateDefinitionServiceSpec.scala | 6 +- .../deployment/DeploymentServiceSpec.scala | 461 ++++++---------- .../TestDeploymentServiceFactory.scala | 130 +++++ .../InconsistentStateDetectorTest.scala | 98 ++++ ...eriodicProcessServiceIntegrationTest.scala | 35 +- .../PeriodicProcessesFetchingTest.scala | 8 +- .../flink/DeploymentManagerStub.scala | 69 +-- .../periodic/flink/FlinkClientStub.scala | 30 +- .../flink/PeriodicDeploymentManagerTest.scala | 501 ++++++++++-------- .../periodic/flink/PeriodicProcessGen.scala | 5 +- .../flink/PeriodicProcessServiceTest.scala | 30 +- ...dicProcessStateDefinitionManagerTest.scala | 27 +- .../statistics/ScenarioStatisticsTest.scala | 4 +- ...StatisticsReportsSettingsServiceTest.scala | 6 +- docs/MigrationGuide.md | 10 +- .../ScenarioDeploymentConfiguration.md | 1 - ...DevelopmentDeploymentManagerProvider.scala | 65 +-- ...lopmentProcessStateDefinitionManager.scala | 15 +- .../MockableDeploymentManagerProvider.scala | 60 +-- .../engine/flink/api/NkGlobalParameters.scala | 4 + .../FlinkScenarioTestingJob.scala | 4 +- .../BaseFlinkDeploymentManagerSpec.scala | 69 ++- .../JavaConfigDeploymentManagerSpec.scala | 2 +- .../streaming/StreamingDockerTest.scala | 21 +- .../management/FlinkDeploymentManager.scala | 182 +++---- .../FlinkScheduledExecutionPerformer.scala | 18 +- .../engine/management/FlinkSlotsChecker.scala | 11 +- .../engine/management/FlinkStateStatus.scala | 8 +- .../FlinkStatusDetailsDeterminer.scala | 49 +- .../FlinkMiniClusterScenarioJobRunner.scala | 17 +- .../jobrunner/FlinkScenarioJobRunner.scala | 4 +- .../RemoteFlinkScenarioJobRunner.scala | 10 +- .../management/rest/CachedFlinkClient.scala | 26 +- .../engine/management/rest/FlinkClient.scala | 16 +- .../management/rest/HttpFlinkClient.scala | 42 +- .../management/rest/flinkRestModel.scala | 15 +- .../FlinkDeploymentManagerSpec.scala | 260 ++------- .../management/FlinkProcessStateSpec.scala | 42 -- .../FlinkScenarioStatusDtoSpec.scala | 44 ++ .../management/FlinkSlotsCheckerTest.scala | 19 +- .../rest/CachedFlinkClientTest.scala | 26 +- .../management/rest/FlinkHttpClientTest.scala | 11 +- .../management/utils/JobIdGenerator.scala | 14 + .../minicluster/FlinkMiniClusterConfig.scala | 1 - .../minicluster/FlinkMiniClusterFactory.scala | 6 +- ...terStreamExecutionEnvironmentFactory.scala | 11 +- ...allbackToSingleUseMiniClusterHandler.scala | 1 - .../engine/flink/test/FlinkSpec.scala | 1 - .../embedded/EmbeddedDeploymentManager.scala | 29 +- ...mbeddedProcessStateDefinitionManager.scala | 6 +- ...esponseEmbeddedDeploymentManagerTest.scala | 6 +- ...EmbeddedDeploymentManagerRestartTest.scala | 6 +- ...reamingEmbeddedDeploymentManagerTest.scala | 22 +- .../k8s/manager/K8sDeploymentManager.scala | 19 +- .../manager/K8sDeploymentStatusMapper.scala | 79 ++- .../BaseK8sDeploymentManagerTest.scala | 6 +- .../K8sDeploymentManagerKafkaTest.scala | 6 +- .../K8sDeploymentManagerOnMocksTest.scala | 4 +- .../K8sDeploymentManagerReqRespTest.scala | 4 +- .../K8sDeploymentStatusMapperSpec.scala | 84 +-- .../nussknacker/engine/lite/TestRunner.scala | 4 +- .../engine/api/deployment/ProcessAction.scala | 25 +- .../engine/api/deployment/ProcessState.scala | 97 ---- .../engine/api/deployment/StateStatus.scala | 23 + ...cComponentStaticDefinitionDeterminer.scala | 3 +- ...tionExpressionParameterValidatorTest.scala | 3 +- 132 files changed, 3126 insertions(+), 3101 deletions(-) delete mode 100644 designer/client/src/components/Process/ProcessErrors.tsx create mode 100644 designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentStatusDetails.scala delete mode 100644 designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/inconsistency/InconsistentStateDetector.scala delete mode 100644 designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleProcessStateSpec.scala create mode 100644 designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleScenarioStatusDtoSpec.scala delete mode 100644 designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/StateStatusCodingSpec.scala create mode 100644 designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioStatusDto.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ScenarioStatusPresenter.scala delete mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioStateProvider.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/DeploymentManagerReliableStatusesWrapper.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/EngineSideDeploymentStatusesProvider.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/FinishedDeploymentsStatusesSynchronizationScheduler.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/ScenarioDeploymentReconciler.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetector.scala create mode 100644 designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/ScenarioStatusProvider.scala delete mode 100644 designer/server/src/test/scala/pl/touk/nussknacker/test/mock/StubScenarioStateProvider.scala create mode 100644 designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/TestDeploymentServiceFactory.scala create mode 100644 designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetectorTest.scala delete mode 100644 engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkProcessStateSpec.scala create mode 100644 engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkScenarioStatusDtoSpec.scala create mode 100644 engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/utils/JobIdGenerator.scala delete mode 100644 extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessState.scala create mode 100644 extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/StateStatus.scala diff --git a/designer/client/src/components/Process/ProcessErrors.tsx b/designer/client/src/components/Process/ProcessErrors.tsx deleted file mode 100644 index 4089a5f5545..00000000000 --- a/designer/client/src/components/Process/ProcessErrors.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import React from "react"; -import { useTranslation } from "react-i18next"; -import { ProcessStateType } from "./types"; - -export function Errors({ state }: { state: ProcessStateType }) { - const { t } = useTranslation(); - - if (state.errors?.length < 1) { - return null; - } - - return ( -
- {t("stateIcon.errors", "Errors:")} - -
- ); -} diff --git a/designer/client/src/components/Process/ProcessStateIcon.tsx b/designer/client/src/components/Process/ProcessStateIcon.tsx index 89658ebe15e..55a813a2738 100644 --- a/designer/client/src/components/Process/ProcessStateIcon.tsx +++ b/designer/client/src/components/Process/ProcessStateIcon.tsx @@ -3,7 +3,6 @@ import { ProcessStateType, Scenario } from "./types"; import ProcessStateUtils from "./ProcessStateUtils"; import UrlIcon from "../UrlIcon"; import { Box, Divider, Popover, styled, Typography } from "@mui/material"; -import { Errors } from "./ProcessErrors"; const StyledUrlIcon = styled(UrlIcon)(({ theme }) => ({ width: theme.spacing(2.5), @@ -44,7 +43,6 @@ function ProcessStateIcon({ scenario, processState }: Props) { {tooltip} - diff --git a/designer/client/src/components/Process/types.ts b/designer/client/src/components/Process/types.ts index 8e86426b99d..3088007f8d7 100644 --- a/designer/client/src/components/Process/types.ts +++ b/designer/client/src/components/Process/types.ts @@ -1,5 +1,5 @@ /* eslint-disable i18next/no-literal-string */ -import { UnknownRecord, Instant } from "../../types/common"; +import { Instant } from "../../types/common"; import { ScenarioGraph, ValidationResult } from "../../types"; import { ProcessingMode } from "../../http/HttpService"; @@ -48,17 +48,12 @@ export type ProcessName = Scenario["name"]; export type ProcessStateType = { status: StatusType; - externalDeploymentId?: string; visibleActions: Array; allowedActions: Array; actionTooltips: Record; icon: string; tooltip: string; description: string; - startTime?: Date; - attributes?: UnknownRecord; - errors?: Array; - version?: number | null; }; export type StatusType = { diff --git a/designer/client/src/reducers/graph/utils.fixtures.ts b/designer/client/src/reducers/graph/utils.fixtures.ts index f80d05bbc18..83609e6761e 100644 --- a/designer/client/src/reducers/graph/utils.fixtures.ts +++ b/designer/client/src/reducers/graph/utils.fixtures.ts @@ -162,20 +162,15 @@ export const state: GraphState = { ], }, state: { - externalDeploymentId: null, status: { name: "NOT_DEPLOYED", }, - version: null, visibleActions: ["DEPLOY", "ARCHIVE", "RENAME"], allowedActions: ["DEPLOY", "ARCHIVE", "RENAME"], actionTooltips: {}, icon: "/assets/states/not-deployed.svg", tooltip: "The scenario is not deployed.", description: "The scenario is not deployed.", - startTime: null, - attributes: null, - errors: [], }, validationResult: { errors: { diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentManager.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentManager.scala index ce2fa774423..b6b8005c313 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentManager.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentManager.scala @@ -1,101 +1,31 @@ package pl.touk.nussknacker.engine.api.deployment import com.typesafe.config.Config -import pl.touk.nussknacker.engine.api.deployment.inconsistency.InconsistentStateDetector import pl.touk.nussknacker.engine.api.deployment.scheduler.services._ -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName} import pl.touk.nussknacker.engine.newdeployment -import pl.touk.nussknacker.engine.util.WithDataFreshnessStatusUtils.WithDataFreshnessStatusOps import java.time.Instant -import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.Future -trait DeploymentManagerInconsistentStateHandlerMixIn { - self: DeploymentManager => - - final override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - val engineStateResolvedWithLastAction = flattenStatus(lastStateAction, statusDetails) - Future.successful( - processStateDefinitionManager.processState( - engineStateResolvedWithLastAction, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId - ) - ) - } - - // This method is protected to make possible to override it with own logic handling different edge cases like - // other state on engine than based on lastStateAction - protected def flattenStatus( - lastStateAction: Option[ProcessAction], - statusDetails: List[StatusDetails] - ): StatusDetails = { - InconsistentStateDetector.resolve(statusDetails, lastStateAction) - } - -} - trait DeploymentManager extends AutoCloseable { def deploymentSynchronisationSupport: DeploymentSynchronisationSupport - def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport + def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport def schedulingSupport: SchedulingSupport def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] - final def getProcessState( - idWithName: ProcessIdWithName, - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - )( - implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[ProcessState]] = { - for { - statusDetailsWithFreshness <- getProcessStates(idWithName.name) - stateWithFreshness <- resolve( - idWithName, - statusDetailsWithFreshness.value, - lastStateAction, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ).map(statusDetailsWithFreshness.withValue) - } yield stateWithFreshness - } - /** * We provide a special wrapper called WithDataFreshnessStatus to ensure that fetched data is restored * from the cache or not. If you use any kind of cache in your DM implementation please wrap result data * with WithDataFreshnessStatus.cached(data) in opposite situation use WithDataFreshnessStatus.fresh(data) */ - def getProcessStates(name: ProcessName)( + def getScenarioDeploymentsStatuses(scenarioName: ProcessName)( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[List[StatusDetails]]] - - /** - * Resolves possible inconsistency with lastAction and formats status using `ProcessStateDefinitionManager` - */ - def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] + ): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] def processStateDefinitionManager: ProcessStateDefinitionManager @@ -113,17 +43,17 @@ trait ManagerSpecificScenarioActivitiesStoredByManager { self: DeploymentManager } -sealed trait StateQueryForAllScenariosSupport +sealed trait DeploymentsStatusesQueryForAllScenariosSupport -trait StateQueryForAllScenariosSupported extends StateQueryForAllScenariosSupport { +trait DeploymentsStatusesQueryForAllScenariosSupported extends DeploymentsStatusesQueryForAllScenariosSupport { - def getAllProcessesStates()( + def getAllScenariosDeploymentsStatuses()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]] + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]] } -case object NoStateQueryForAllScenariosSupport extends StateQueryForAllScenariosSupport +case object NoDeploymentsStatusesQueryForAllScenariosSupport extends DeploymentsStatusesQueryForAllScenariosSupport sealed trait DeploymentSynchronisationSupport diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentStatusDetails.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentStatusDetails.scala new file mode 100644 index 00000000000..2d491812acf --- /dev/null +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/DeploymentStatusDetails.scala @@ -0,0 +1,20 @@ +package pl.touk.nussknacker.engine.api.deployment + +import pl.touk.nussknacker.engine.api.process.VersionId +import pl.touk.nussknacker.engine.deployment.DeploymentId + +// TODO replace by simple tuple DeploymentId -> DeploymentStatus after fixing TODOs +case class DeploymentStatusDetails( + status: StateStatus, + // deploymentId is optional because some deployment managers (k8s) don't support it + deploymentId: Option[DeploymentId], + // version might be unavailable in some failing cases. It is used during checking if deployed version is the same + // as expected by user - see InconsistentStateDetector. + // TODO it should be an attribute of "following deploy" StateStatuses: DuringDeploy, Running and Finished + version: Option[VersionId], +) { + + def deploymentIdUnsafe: DeploymentId = + deploymentId.getOrElse(throw new IllegalStateException(s"deploymentId is missing")) + +} diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManager.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManager.scala index c07966c9d30..80a75e0dbe0 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManager.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManager.scala @@ -1,6 +1,6 @@ package pl.touk.nussknacker.engine.api.deployment -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import java.net.URI @@ -21,32 +21,42 @@ import java.net.URI */ class OverridingProcessStateDefinitionManager( delegate: ProcessStateDefinitionManager, - statusActionsPF: PartialFunction[ProcessStatus, List[ScenarioActionName]] = PartialFunction.empty, + statusActionsPF: PartialFunction[ScenarioStatusWithScenarioContext, Set[ScenarioActionName]] = + PartialFunction.empty, statusIconsPF: PartialFunction[StateStatus, URI] = PartialFunction.empty, statusTooltipsPF: PartialFunction[StateStatus, String] = PartialFunction.empty, statusDescriptionsPF: PartialFunction[StateStatus, String] = PartialFunction.empty, customStateDefinitions: Map[StatusName, StateDefinitionDetails] = Map.empty, customVisibleActions: Option[List[ScenarioActionName]] = None, - customActionTooltips: Option[ProcessStatus => Map[ScenarioActionName, String]] = None, + customActionTooltips: Option[ScenarioStatusWithScenarioContext => Map[ScenarioActionName, String]] = None, ) extends ProcessStateDefinitionManager { - override def visibleActions: List[ScenarioActionName] = - customVisibleActions.getOrElse(delegate.visibleActions) + override def visibleActions(input: ScenarioStatusWithScenarioContext): List[ScenarioActionName] = + customVisibleActions.getOrElse(delegate.visibleActions(input)) - override def statusActions(processStatus: ProcessStatus): List[ScenarioActionName] = - statusActionsPF.applyOrElse(processStatus, delegate.statusActions) + override def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] = + statusActionsPF.applyOrElse(input, delegate.statusActions) - override def actionTooltips(processStatus: ProcessStatus): Map[ScenarioActionName, String] = - customActionTooltips.map(_(processStatus)).getOrElse(delegate.actionTooltips(processStatus)) + override def actionTooltips(input: ScenarioStatusWithScenarioContext): Map[ScenarioActionName, String] = + customActionTooltips.map(_(input)).getOrElse(delegate.actionTooltips(input)) - override def statusIcon(stateStatus: StateStatus): URI = - statusIconsPF.orElse(stateDefinitionsPF(_.icon)).applyOrElse(stateStatus, delegate.statusIcon) + override def statusIcon(input: ScenarioStatusWithScenarioContext): URI = + statusIconsPF + .orElse(stateDefinitionsPF(_.icon)) + .lift(input.scenarioStatus) + .getOrElse(delegate.statusIcon(input)) - override def statusTooltip(stateStatus: StateStatus): String = - statusTooltipsPF.orElse(stateDefinitionsPF(_.tooltip)).applyOrElse(stateStatus, delegate.statusTooltip) + override def statusTooltip(input: ScenarioStatusWithScenarioContext): String = + statusTooltipsPF + .orElse(stateDefinitionsPF(_.tooltip)) + .lift(input.scenarioStatus) + .getOrElse(delegate.statusTooltip(input)) - override def statusDescription(stateStatus: StateStatus): String = - statusDescriptionsPF.orElse(stateDefinitionsPF(_.description)).applyOrElse(stateStatus, delegate.statusDescription) + override def statusDescription(input: ScenarioStatusWithScenarioContext): String = + statusDescriptionsPF + .orElse(stateDefinitionsPF(_.description)) + .lift(input.scenarioStatus) + .getOrElse(delegate.statusDescription(input)) override def stateDefinitions: Map[StatusName, StateDefinitionDetails] = delegate.stateDefinitions ++ customStateDefinitions diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessStateDefinitionManager.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessStateDefinitionManager.scala index 19ca137a742..2278991e320 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessStateDefinitionManager.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessStateDefinitionManager.scala @@ -1,12 +1,16 @@ package pl.touk.nussknacker.engine.api.deployment -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.{ProcessStatus, defaultVisibleActions} +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.{ + DefaultVisibleActions, + ScenarioStatusPresentationDetails, + ScenarioStatusWithScenarioContext +} import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import pl.touk.nussknacker.engine.api.process.VersionId import java.net.URI -//@TODO: In future clean up it. +// TODO: Some cleanups such as rename to sth close to presentation /** * Used to specify status definitions (for filtering and scenario status visualization) and status transitions (actions). */ @@ -29,53 +33,44 @@ trait ProcessStateDefinitionManager { * Override those methods to customize varying state properties or custom visualizations, * e.g. handle schedule date in [[PeriodicProcessStateDefinitionManager]] */ - def statusTooltip(stateStatus: StateStatus): String = - stateDefinitions(stateStatus.name).tooltip + def statusTooltip(input: ScenarioStatusWithScenarioContext): String = + stateDefinitions(input.scenarioStatus.name).tooltip + + def statusDescription(input: ScenarioStatusWithScenarioContext): String = + stateDefinitions(input.scenarioStatus.name).description - def statusDescription(stateStatus: StateStatus): String = - stateDefinitions(stateStatus.name).description + def statusIcon(input: ScenarioStatusWithScenarioContext): URI = + statusIcon(input.scenarioStatus) - def statusIcon(stateStatus: StateStatus): URI = - stateDefinitions(stateStatus.name).icon + private[nussknacker] def statusIcon(status: StateStatus): URI = + stateDefinitions(status.name).icon /** * Actions that are applicable to scenario in general. They may be available only in particular states, as defined by `def statusActions` */ - def visibleActions: List[ScenarioActionName] = defaultVisibleActions + def visibleActions(input: ScenarioStatusWithScenarioContext): List[ScenarioActionName] = DefaultVisibleActions /** * Custom tooltips for actions */ - def actionTooltips(processStatus: ProcessStatus): Map[ScenarioActionName, String] = Map.empty + def actionTooltips(input: ScenarioStatusWithScenarioContext): Map[ScenarioActionName, String] = Map.empty /** * Allowed transitions between states. */ - def statusActions(processStatus: ProcessStatus): List[ScenarioActionName] + def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] /** - * Enhances raw [[StateStatus]] with scenario properties, including deployment info. + * Returns presentations details of status */ - def processState( - statusDetails: StatusDetails, - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): ProcessState = { - val status = ProcessStatus(statusDetails.status, latestVersionId, deployedVersionId, currentlyPresentedVersionId) - ProcessState( - statusDetails.externalDeploymentId, - statusDetails.status, - statusDetails.version, - visibleActions, - statusActions(status), - actionTooltips(status), - statusIcon(statusDetails.status), - statusTooltip(statusDetails.status), - statusDescription(statusDetails.status), - statusDetails.startTime, - statusDetails.attributes, - statusDetails.errors + def statusPresentation(input: ScenarioStatusWithScenarioContext): ScenarioStatusPresentationDetails = { + ScenarioStatusPresentationDetails( + visibleActions(input), + statusActions(input), + actionTooltips(input), + statusIcon(input), + statusTooltip(input), + statusDescription(input), ) } @@ -84,23 +79,32 @@ trait ProcessStateDefinitionManager { object ProcessStateDefinitionManager { /** - * ProcessStatus contains status of the scenario, it is used as argument of ProcessStateDefinitionManager methods + * ScenarioStatusWithScenarioContext contains status of the scenario, as context of its based on DB state. + * It is used as an argument of ProcessStateDefinitionManager methods * - * @param stateStatus current scenario state - * @param latestVersionId latest saved versionId for the scenario + * @param scenarioStatus current scenario state * @param deployedVersionId currently deployed versionId of the scenario */ - final case class ProcessStatus( - stateStatus: StateStatus, - latestVersionId: VersionId, + final case class ScenarioStatusWithScenarioContext( + scenarioStatus: StateStatus, deployedVersionId: Option[VersionId], currentlyPresentedVersionId: Option[VersionId], ) + final case class ScenarioStatusPresentationDetails( + visibleActions: List[ScenarioActionName], + // This one is not exactly a part of presentation, it is rather a thing related with scenario lifecycle but for now it is kept here + allowedActions: Set[ScenarioActionName], + actionTooltips: Map[ScenarioActionName, String], + icon: URI, + tooltip: String, + description: String + ) + /** * Actions, that are applicable in standard use-cases for most deployment managers. */ - val defaultVisibleActions: List[ScenarioActionName] = List( + val DefaultVisibleActions: List[ScenarioActionName] = List( ScenarioActionName.Cancel, ScenarioActionName.Deploy, ScenarioActionName.Pause, diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManager.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManager.scala index 1689041c83b..498fb322c01 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManager.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManager.scala @@ -4,7 +4,7 @@ import com.github.benmanes.caffeine.cache.{AsyncCache, Caffeine} import com.typesafe.config.Config import com.typesafe.scalalogging.LazyLogging import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.ProcessName import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits._ @@ -15,38 +15,21 @@ class CachingProcessStateDeploymentManager( delegate: DeploymentManager, cacheTTL: FiniteDuration, override val deploymentSynchronisationSupport: DeploymentSynchronisationSupport, - override val stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport, + override val deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport, override val schedulingSupport: SchedulingSupport, ) extends DeploymentManager { - private val cache: AsyncCache[ProcessName, List[StatusDetails]] = Caffeine + private val cache: AsyncCache[ProcessName, List[DeploymentStatusDetails]] = Caffeine .newBuilder() .expireAfterWrite(java.time.Duration.ofMillis(cacheTTL.toMillis)) - .buildAsync[ProcessName, List[StatusDetails]] - - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = - delegate.resolve( - idWithName, - statusDetails, - lastStateAction, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId - ) - - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - def fetchAndUpdateCache(): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - val resultFuture = delegate.getProcessStates(name) - cache.put(name, resultFuture.map(_.value).toJava.toCompletableFuture) + .buildAsync[ProcessName, List[DeploymentStatusDetails]] + + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + def fetchAndUpdateCache(): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + val resultFuture = delegate.getScenarioDeploymentsStatuses(scenarioName) + cache.put(scenarioName, resultFuture.map(_.value).toJava.toCompletableFuture) resultFuture } @@ -54,7 +37,7 @@ class CachingProcessStateDeploymentManager( case DataFreshnessPolicy.Fresh => fetchAndUpdateCache() case DataFreshnessPolicy.CanBeCached => - Option(cache.getIfPresent(name)) + Option(cache.getIfPresent(scenarioName)) .map(_.toScala.map(WithDataFreshnessStatus.cached)) .getOrElse( fetchAndUpdateCache() @@ -84,12 +67,12 @@ object CachingProcessStateDeploymentManager extends LazyLogging { delegate, cacheTTL, delegate.deploymentSynchronisationSupport, - delegate.stateQueryForAllScenariosSupport, + delegate.deploymentsStatusesQueryForAllScenariosSupport, delegate.schedulingSupport, ) } .getOrElse { - logger.debug(s"Skipping ProcessState caching for DeploymentManager: $delegate") + logger.debug(s"Skipping state caching for DeploymentManager: $delegate") delegate } } diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/inconsistency/InconsistentStateDetector.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/inconsistency/InconsistentStateDetector.scala deleted file mode 100644 index c2f713a020a..00000000000 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/inconsistency/InconsistentStateDetector.scala +++ /dev/null @@ -1,154 +0,0 @@ -package pl.touk.nussknacker.engine.api.deployment.inconsistency - -import com.typesafe.scalalogging.LazyLogging -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.deployment.{ProcessAction, ProcessActionState, ScenarioActionName, StatusDetails} -import pl.touk.nussknacker.engine.deployment.DeploymentId - -object InconsistentStateDetector extends InconsistentStateDetector - -class InconsistentStateDetector extends LazyLogging { - - def resolve(statusDetails: List[StatusDetails], lastStateAction: Option[ProcessAction]): StatusDetails = { - val status = (doExtractAtMostOneStatus(statusDetails), lastStateAction) match { - case (Left(state), _) => state - case (Right(Some(state)), _) if shouldAlwaysReturnStatus(state) => state - case (Right(Some(state)), _) if state.status == SimpleStateStatus.Restarting => - handleRestartingState(state, lastStateAction) - case (Right(statusDetailsOpt), Some(action)) - if action.actionName == ScenarioActionName.Deploy && action.state == ProcessActionState.ExecutionFinished => - handleLastActionFinishedDeploy(statusDetailsOpt, action) - case (Right(statusDetailsOpt), Some(action)) if action.actionName == ScenarioActionName.Deploy => - handleLastActionDeploy(statusDetailsOpt, action) - case (Right(Some(state)), _) if isFollowingDeployStatus(state) => - handleFollowingDeployState(state, lastStateAction) - case (Right(statusDetailsOpt), Some(action)) if action.actionName == ScenarioActionName.Cancel => - handleCanceledState(statusDetailsOpt) - case (Right(Some(state)), _) => handleState(state, lastStateAction) - case (Right(None), Some(a)) => StatusDetails(SimpleStateStatus.NotDeployed, Some(DeploymentId.fromActionId(a.id))) - case (Right(None), None) => StatusDetails(SimpleStateStatus.NotDeployed, None) - } - logger.debug(s"Resolved $statusDetails , lastStateAction: $lastStateAction to status $status") - status - } - - // TODO: This method is exposed to make transition between Option[StatusDetails] and List[StatusDetails] easier to perform. - // After full migration to List[StatusDetails], this method should be removed - def extractAtMostOneStatus(statusDetails: List[StatusDetails]): Option[StatusDetails] = - doExtractAtMostOneStatus(statusDetails).fold(Some(_), identity) - - private def doExtractAtMostOneStatus( - statusDetails: List[StatusDetails] - ): Either[StatusDetails, Option[StatusDetails]] = { - val notFinalStatuses = statusDetails.filterNot(isFinalOrTransitioningToFinalStatus) - (statusDetails, notFinalStatuses) match { - case (Nil, Nil) => Right(None) - case (_, singleNotFinished :: Nil) => Right(Some(singleNotFinished)) - case (_, firstNotFinished :: _ :: _) => - Left( - firstNotFinished.copy( - status = ProblemStateStatus.MultipleJobsRunning, - errors = List(s"Expected one job, instead: ${notFinalStatuses - .map(details => details.externalDeploymentId.map(_.value).getOrElse("missing") + " - " + details.status) - .mkString(", ")}") - ) - ) - case (firstFinished :: _, Nil) => Right(Some(firstFinished)) - } - } - - private def handleState(statusDetails: StatusDetails, lastStateAction: Option[ProcessAction]): StatusDetails = - statusDetails.status match { - case SimpleStateStatus.Restarting | SimpleStateStatus.DuringCancel | SimpleStateStatus.Finished - if lastStateAction.isEmpty => - statusDetails.copy(status = ProblemStateStatus.ProcessWithoutAction) - case _ => statusDetails - } - - // This method handles some corner cases for canceled process -> with last action = Canceled - private def handleCanceledState(statusDetailsOpt: Option[StatusDetails]): StatusDetails = - statusDetailsOpt - // Missing deployment is fine for cancelled action as well because of retention of states - .getOrElse(StatusDetails(SimpleStateStatus.Canceled, None)) - - private def handleRestartingState( - statusDetails: StatusDetails, - lastStateAction: Option[ProcessAction] - ): StatusDetails = - lastStateAction match { - case Some(action) if action.actionName == ScenarioActionName.Deploy => statusDetails - case _ => handleState(statusDetails, lastStateAction) - } - - // This method handles some corner cases for following deploy state mismatch last action version - private def handleFollowingDeployState( - statusDetails: StatusDetails, - lastStateAction: Option[ProcessAction] - ): StatusDetails = - lastStateAction match { - case Some(action) if action.actionName != ScenarioActionName.Deploy => - statusDetails.copy(status = ProblemStateStatus.shouldNotBeRunning(true)) - case Some(_) => - statusDetails - case None => - statusDetails.copy(status = ProblemStateStatus.shouldNotBeRunning(false)) - } - - private def handleLastActionFinishedDeploy( - statusDetailsOpt: Option[StatusDetails], - action: ProcessAction - ): StatusDetails = - statusDetailsOpt match { - case Some(state) => - state - case None => - // Some engines like Flink have jobs retention. Because of that we restore finished state. See FlinkDeploymentManager.postprocess - StatusDetails(SimpleStateStatus.Finished, Some(DeploymentId.fromActionId(action.id))) - } - - // This method handles some corner cases for deployed action mismatch state version - private def handleLastActionDeploy(statusDetailsOpt: Option[StatusDetails], action: ProcessAction): StatusDetails = - statusDetailsOpt match { - case Some(state) => - state.version match { - case _ if !isFollowingDeployStatus(state) && !isFinishedStatus(state) => - logger.debug( - s"handleLastActionDeploy: is not following deploy status nor finished, but it should be. $state" - ) - state.copy(status = ProblemStateStatus.shouldBeRunning(action.processVersionId, action.user)) - case Some(ver) if ver.versionId != action.processVersionId => - state.copy(status = - ProblemStateStatus.mismatchDeployedVersion(ver.versionId, action.processVersionId, action.user) - ) - case Some(ver) if ver.versionId == action.processVersionId => - state - case None => // TODO: we should remove Option from ProcessVersion? - state.copy(status = ProblemStateStatus.missingDeployedVersion(action.processVersionId, action.user)) - case _ => - state.copy(status = ProblemStateStatus.Failed) // Generic error in other cases - } - case None => - logger.debug( - s"handleLastActionDeploy for empty statusDetails. Action.processVersionId: ${action.processVersionId}" - ) - StatusDetails(ProblemStateStatus.shouldBeRunning(action.processVersionId, action.user), None) - } - - // Methods below are protected in case of other state machine implementation for a given DeploymentManager - protected def shouldAlwaysReturnStatus(state: StatusDetails): Boolean = { - ProblemStateStatus.isProblemStatus(state.status) - } - - protected def isFollowingDeployStatus(state: StatusDetails): Boolean = { - SimpleStateStatus.DefaultFollowingDeployStatuses.contains(state.status) - } - - protected def isFinalOrTransitioningToFinalStatus(state: StatusDetails): Boolean = - SimpleStateStatus.isFinalOrTransitioningToFinalStatus(state.status) - - protected def isFinishedStatus(state: StatusDetails): Boolean = { - state.status == SimpleStateStatus.Finished - } - -} diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/scheduler/services/ScheduledProcessListener.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/scheduler/services/ScheduledProcessListener.scala index b1653dedff3..a734ff983af 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/scheduler/services/ScheduledProcessListener.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/scheduler/services/ScheduledProcessListener.scala @@ -1,7 +1,7 @@ package pl.touk.nussknacker.engine.api.deployment.scheduler.services import com.typesafe.config.Config -import pl.touk.nussknacker.engine.api.deployment.StatusDetails +import pl.touk.nussknacker.engine.api.deployment.DeploymentStatusDetails import pl.touk.nussknacker.engine.api.deployment.scheduler.model.ScheduledDeploymentDetails import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId @@ -32,17 +32,17 @@ case class DeployedEvent( case class FinishedEvent( deployment: ScheduledDeploymentDetails, canonicalProcess: CanonicalProcess, - processState: Option[StatusDetails] + processState: Option[DeploymentStatusDetails] ) extends ScheduledProcessEvent case class FailedOnDeployEvent( deployment: ScheduledDeploymentDetails, - processState: Option[StatusDetails] + processState: Option[DeploymentStatusDetails] ) extends ScheduledProcessEvent case class FailedOnRunEvent( deployment: ScheduledDeploymentDetails, - processState: Option[StatusDetails] + processState: Option[DeploymentStatusDetails] ) extends ScheduledProcessEvent case class ScheduledEvent(deployment: ScheduledDeploymentDetails, firstSchedule: Boolean) extends ScheduledProcessEvent diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleProcessStateDefinitionManager.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleProcessStateDefinitionManager.scala index 9baefe26122..53178e28055 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleProcessStateDefinitionManager.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleProcessStateDefinitionManager.scala @@ -1,19 +1,15 @@ package pl.touk.nussknacker.engine.api.deployment.simple -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName.DefaultActions import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.{ProblemStateStatus, statusActionsPF} import pl.touk.nussknacker.engine.api.deployment.{ - ProcessState, ProcessStateDefinitionManager, ScenarioActionName, StateDefinitionDetails, - StateStatus, - StatusDetails + StateStatus } -import pl.touk.nussknacker.engine.api.process.VersionId /** * Base [[ProcessStateDefinitionManager]] with basic state definitions and state transitions. @@ -22,23 +18,27 @@ import pl.touk.nussknacker.engine.api.process.VersionId */ object SimpleProcessStateDefinitionManager extends ProcessStateDefinitionManager { - override def statusActions(processStatus: ProcessStatus): List[ScenarioActionName] = - statusActionsPF.applyOrElse(processStatus, (_: ProcessStatus) => DefaultActions) + override def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] = + statusActionsPF.lift(input.scenarioStatus).getOrElse(DefaultActions.toSet) - override def statusDescription(stateStatus: StateStatus): String = stateStatus match { - case _ @ProblemStateStatus(message, _) => message - case _ => SimpleStateStatus.definitions(stateStatus.name).description + override def statusDescription(input: ScenarioStatusWithScenarioContext): String = statusDescription( + input.scenarioStatus + ) + + private[nussknacker] def statusDescription(status: StateStatus): String = status match { + case _ @ProblemStateStatus(message, _, _) => message + case _ => SimpleStateStatus.definitions(status.name).description } - override def statusTooltip(stateStatus: StateStatus): String = stateStatus match { - case _ @ProblemStateStatus(message, _) => message - case _ => SimpleStateStatus.definitions(stateStatus.name).tooltip + override def statusTooltip(input: ScenarioStatusWithScenarioContext): String = statusTooltip(input.scenarioStatus) + + private[nussknacker] def statusTooltip(status: StateStatus): String = status match { + case _ @ProblemStateStatus(message, _, Some(tooltip)) => tooltip + case _ @ProblemStateStatus(message, _, _) => message + case _ => SimpleStateStatus.definitions(status.name).tooltip } override def stateDefinitions: Map[StatusName, StateDefinitionDetails] = SimpleStateStatus.definitions - def errorFailedToGet(versionId: VersionId): ProcessState = - processState(StatusDetails(FailedToGet, None), versionId, None, None) - } diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleStateStatus.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleStateStatus.scala index 2e2c59f67dd..4286403c699 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleStateStatus.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/simple/SimpleStateStatus.scala @@ -1,10 +1,10 @@ package pl.touk.nussknacker.engine.api.deployment.simple -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus.defaultActions import pl.touk.nussknacker.engine.api.process.VersionId +import pl.touk.nussknacker.engine.deployment.DeploymentId import java.net.URI @@ -20,8 +20,11 @@ object SimpleStateStatus { } // Represents general problem. - final case class ProblemStateStatus(description: String, allowedActions: List[ScenarioActionName] = defaultActions) - extends StateStatus { + final case class ProblemStateStatus( + description: String, + allowedActions: Set[ScenarioActionName] = defaultActions, + tooltip: Option[String] = None + ) extends StateStatus { override def name: StatusName = ProblemStateStatus.name } @@ -32,15 +35,15 @@ object SimpleStateStatus { val icon: URI = URI.create("/assets/states/error.svg") val defaultDescription = "There are some problems with scenario." - val defaultActions: List[ScenarioActionName] = - List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + val defaultActions: Set[ScenarioActionName] = + Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) // Problem factory methods val Failed: ProblemStateStatus = ProblemStateStatus(defaultDescription) val ArchivedShouldBeCanceled: ProblemStateStatus = - ProblemStateStatus("Archived scenario should be canceled.", List(ScenarioActionName.Cancel)) + ProblemStateStatus("Archived scenario should be canceled.", Set(ScenarioActionName.Cancel)) val FailedToGet: ProblemStateStatus = ProblemStateStatus(s"Failed to get a state of the scenario.") @@ -67,11 +70,18 @@ object SimpleStateStatus { def missingDeployedVersion(exceptedVersionId: VersionId, user: String): ProblemStateStatus = ProblemStateStatus(s"Scenario deployed without version by $user, expected version $exceptedVersionId.") - val ProcessWithoutAction: ProblemStateStatus = - ProblemStateStatus("Scenario state error - no actions found.") - - val MultipleJobsRunning: ProblemStateStatus = - ProblemStateStatus("More than one deployment is running.", List(ScenarioActionName.Cancel)) + def multipleJobsRunning(nonFinalDeploymentIds: List[(DeploymentId, StateStatus)]): ProblemStateStatus = + ProblemStateStatus( + description = "More than one deployment is running.", + allowedActions = Set(ScenarioActionName.Cancel), + tooltip = Some( + nonFinalDeploymentIds + .map { case (deploymentId, deploymentStatus) => + s"$deploymentId - $deploymentStatus" + } + .mkString("Expected one job, instead: ", ", ", "") + ) + ) } @@ -92,21 +102,24 @@ object SimpleStateStatus { status ) - val statusActionsPF: PartialFunction[ProcessStatus, List[ScenarioActionName]] = _.stateStatus match { + val statusActionsPF: PartialFunction[StateStatus, Set[ScenarioActionName]] = { case SimpleStateStatus.NotDeployed => - List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) - case SimpleStateStatus.DuringDeploy => List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + case SimpleStateStatus.DuringDeploy => + Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) case SimpleStateStatus.Running => - List(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) + Set(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) case SimpleStateStatus.Canceled => - List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) - case SimpleStateStatus.Restarting => List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + case SimpleStateStatus.Restarting => + Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) case SimpleStateStatus.Finished => - List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) - case SimpleStateStatus.DuringCancel => List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + case SimpleStateStatus.DuringCancel => + Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) // When Failed - process is in terminal state in Flink and it doesn't require any cleanup in Flink, but in NK it does // - that's why Cancel action is available - case SimpleStateStatus.ProblemStateStatus(_, allowedActions) => allowedActions + case SimpleStateStatus.ProblemStateStatus(_, allowedActions, _) => allowedActions } val definitions: Map[StatusName, StateDefinitionDetails] = Map( diff --git a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/testing/DeploymentManagerStub.scala b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/testing/DeploymentManagerStub.scala index 1919c1d4b35..446b28af9fa 100644 --- a/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/testing/DeploymentManagerStub.scala +++ b/designer/deployment-manager-api/src/main/scala/pl/touk/nussknacker/engine/testing/DeploymentManagerStub.scala @@ -7,7 +7,7 @@ import pl.touk.nussknacker.engine.api.component.ScenarioPropertyConfig import pl.touk.nussknacker.engine.api.definition._ import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.{ BaseModelData, DeploymentManagerDependencies, @@ -15,43 +15,38 @@ import pl.touk.nussknacker.engine.{ MetaDataInitializer } -import scala.concurrent.Future +import scala.collection.concurrent.TrieMap import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} -class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands { - - // We map lastStateAction to state to avoid some corner/blocking cases with the deleting/canceling scenario on tests.. - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - val lastStateActionStatus = lastStateAction match { - case Some(action) if action.actionName == ScenarioActionName.Deploy => - SimpleStateStatus.Running - case Some(action) if action.actionName == ScenarioActionName.Cancel => - SimpleStateStatus.Canceled - case _ => - SimpleStateStatus.NotDeployed - } - Future.successful( - processStateDefinitionManager.processState( - StatusDetails(lastStateActionStatus, None), - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) +class DeploymentManagerStub(implicit ec: ExecutionContext) extends BaseDeploymentManager { + + private val scenarioStatusMap = TrieMap.empty[ProcessName, StateStatus] + + override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { + case _: DMValidateScenarioCommand => Future.successful(()) + case run: DMRunDeploymentCommand => + Future { + scenarioStatusMap.put(run.processVersion.processName, SimpleStateStatus.Running) + None + } + case cancel: DMCancelScenarioCommand => + Future.successful { + scenarioStatusMap.put(cancel.scenarioName, SimpleStateStatus.Canceled) + () + } + case _: DMStopScenarioCommand | _: DMStopDeploymentCommand | _: DMCancelDeploymentCommand | + _: DMMakeScenarioSavepointCommand | _: DMRunOffScheduleCommand | _: DMTestScenarioCommand => + notImplemented } - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { Future.successful( - WithDataFreshnessStatus.fresh(List.empty) + WithDataFreshnessStatus.fresh( + scenarioStatusMap.get(scenarioName).map(DeploymentStatusDetails(_, None, None)).toList + ) ) } @@ -59,7 +54,8 @@ class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport @@ -67,21 +63,6 @@ class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands } -trait StubbingCommands { self: DeploymentManager => - - override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { - case _: DMValidateScenarioCommand => Future.successful(()) - case _: DMRunDeploymentCommand => Future.successful(None) - case _: DMStopDeploymentCommand => Future.successful(SavepointResult("")) - case _: DMStopScenarioCommand => Future.successful(SavepointResult("")) - case _: DMCancelDeploymentCommand => Future.successful(()) - case _: DMCancelScenarioCommand => Future.successful(()) - case _: DMMakeScenarioSavepointCommand => Future.successful(SavepointResult("")) - case _: DMRunOffScheduleCommand | _: DMTestScenarioCommand => notImplemented - } - -} - //This provider can be used for testing. Override methods to implement more complex behaviour //Provider is registered via ServiceLoader, so it can be used e.g. to run simple docker configuration class DeploymentManagerProviderStub extends DeploymentManagerProvider { @@ -91,7 +72,10 @@ class DeploymentManagerProviderStub extends DeploymentManagerProvider { deploymentManagerDependencies: DeploymentManagerDependencies, config: Config, scenarioStateCacheTTL: Option[FiniteDuration] - ): ValidatedNel[String, DeploymentManager] = Validated.valid(new DeploymentManagerStub) + ): ValidatedNel[String, DeploymentManager] = { + import deploymentManagerDependencies._ + Validated.valid(new DeploymentManagerStub) + } override def name: String = "stub" diff --git a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManagerTest.scala b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManagerTest.scala index 79f411db975..b124e500a0c 100644 --- a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManagerTest.scala +++ b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/OverridingProcessStateDefinitionManagerTest.scala @@ -2,9 +2,10 @@ package pl.touk.nussknacker.engine.api.deployment import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.StateDefinitionDetails.UnknownIcon import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName +import pl.touk.nussknacker.engine.api.process.VersionId class OverridingProcessStateDefinitionManagerTest extends AnyFunSuite with Matchers { @@ -28,7 +29,7 @@ class OverridingProcessStateDefinitionManagerTest extends AnyFunSuite with Match ) ) - override def statusActions(processStatus: ProcessStatus): List[ScenarioActionName] = Nil + override def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] = Set.empty } test("should combine delegate state definitions with custom overrides") { @@ -58,10 +59,19 @@ class OverridingProcessStateDefinitionManagerTest extends AnyFunSuite with Match definitionsMap(CustomState.name).description shouldBe "Custom description" definitionsMap(CustomStateThatOverrides.name).description shouldBe "Custom description that overrides" + def toStatusWithContext(status: StateStatus) = + ScenarioStatusWithScenarioContext(status, None, None) + // Description assigned to a scenario, with custom calculations - manager.statusDescription(DefaultState) shouldBe "Calculated description for default, e.g. schedule date" - manager.statusDescription(CustomState) shouldBe "Calculated description for custom, e.g. schedule date" - manager.statusDescription(CustomStateThatOverrides) shouldBe "Custom description that overrides" + manager.statusDescription( + toStatusWithContext(DefaultState) + ) shouldBe "Calculated description for default, e.g. schedule date" + manager.statusDescription( + toStatusWithContext(CustomState) + ) shouldBe "Calculated description for custom, e.g. schedule date" + manager.statusDescription( + toStatusWithContext(CustomStateThatOverrides) + ) shouldBe "Custom description that overrides" } } diff --git a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleProcessStateSpec.scala b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleProcessStateSpec.scala deleted file mode 100644 index d7f0767090b..00000000000 --- a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleProcessStateSpec.scala +++ /dev/null @@ -1,37 +0,0 @@ -package pl.touk.nussknacker.engine.api.deployment - -import org.scalatest.Inside -import org.scalatest.funspec.AnyFunSpec -import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} -import pl.touk.nussknacker.engine.api.process.VersionId -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId - -import scala.collection.immutable.List - -class SimpleProcessStateSpec extends AnyFunSpec with Matchers with Inside { - - def createProcessState(stateStatus: StateStatus): ProcessState = - SimpleProcessStateDefinitionManager.processState( - StatusDetails(stateStatus, None, Some(ExternalDeploymentId("12"))), - VersionId(1), - None, - None, - ) - - it("scenario state should be during deploy") { - val state = createProcessState(SimpleStateStatus.DuringDeploy) - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - } - - it("scenario state should be running") { - val state = createProcessState(SimpleStateStatus.Running) - state.allowedActions shouldBe List(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) - } - - it("scenario state should be finished") { - val state = createProcessState(SimpleStateStatus.Finished) - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) - } - -} diff --git a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleScenarioStatusDtoSpec.scala b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleScenarioStatusDtoSpec.scala new file mode 100644 index 00000000000..ee64fb6fd58 --- /dev/null +++ b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/SimpleScenarioStatusDtoSpec.scala @@ -0,0 +1,39 @@ +package pl.touk.nussknacker.engine.api.deployment + +import org.scalatest.Inside +import org.scalatest.funsuite.AnyFunSuiteLike +import org.scalatest.matchers.should.Matchers +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.{ + ScenarioStatusPresentationDetails, + ScenarioStatusWithScenarioContext +} +import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} +import pl.touk.nussknacker.engine.api.process.VersionId + +class SimpleScenarioStatusDtoSpec extends AnyFunSuiteLike with Matchers with Inside { + + def statusPresentation(status: StateStatus): ScenarioStatusPresentationDetails = + SimpleProcessStateDefinitionManager.statusPresentation( + ScenarioStatusWithScenarioContext( + status, + None, + None, + ) + ) + + test("scenario state should be during deploy") { + val state = statusPresentation(SimpleStateStatus.DuringDeploy) + state.allowedActions shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + } + + test("scenario state should be running") { + val state = statusPresentation(SimpleStateStatus.Running) + state.allowedActions shouldBe Set(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) + } + + test("scenario state should be finished") { + val state = statusPresentation(SimpleStateStatus.Finished) + state.allowedActions shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + } + +} diff --git a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/StateStatusCodingSpec.scala b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/StateStatusCodingSpec.scala deleted file mode 100644 index 61707541eaf..00000000000 --- a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/StateStatusCodingSpec.scala +++ /dev/null @@ -1,51 +0,0 @@ -package pl.touk.nussknacker.engine.api.deployment - -import io.circe.Json -import io.circe.syntax._ -import org.scalatest.OptionValues -import org.scalatest.funsuite.AnyFunSuite -import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.test.EitherValuesDetailedMessage - -class StateStatusCodingSpec extends AnyFunSuite with Matchers with EitherValuesDetailedMessage with OptionValues { - - test("simple status coding") { - val givenStatus: StateStatus = SimpleStateStatus.Running - val statusJson = givenStatus.asJson - statusJson.hcursor.get[String]("name").rightValue shouldEqual "RUNNING" - - val decodedStatus = Json - .obj( - "name" -> Json.fromString("RUNNING") - ) - .as[StateStatus] - .rightValue - decodedStatus.name shouldEqual givenStatus.name - } - - test("custom status coding") { - val givenStatus: StateStatus = MyCustomStateStatus("fooBar") - - val statusJson = givenStatus.asJson - statusJson.hcursor.get[String]("name").rightValue shouldEqual "CUSTOM" - // we don't encode custom state statuses fields be design - - val decodedStatus = Json - .obj( - "name" -> Json.fromString("CUSTOM") - ) - .as[StateStatus] - .rightValue - // we don't decode correctly custom statuses be design - their role is to encapsulate business status of process which will be - // then presented by ProcessStateDefinitionManager - decodedStatus.name shouldEqual givenStatus.name - decodedStatus should not equal givenStatus - } - - sealed case class MyCustomStateStatus(someField: String) extends StateStatus { - override def name: StatusName = "CUSTOM" - } - -} diff --git a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManagerSpec.scala b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManagerSpec.scala index c24c358ff9e..31d5408798a 100644 --- a/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManagerSpec.scala +++ b/designer/deployment-manager-api/src/test/scala/pl/touk/nussknacker/engine/api/deployment/cache/CachingProcessStateDeploymentManagerSpec.scala @@ -10,7 +10,7 @@ import org.scalatestplus.mockito.MockitoSugar import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.process.ProcessName -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId +import pl.touk.nussknacker.engine.deployment.DeploymentId import pl.touk.nussknacker.test.PatientScalaFutures import java.util.UUID @@ -30,7 +30,7 @@ class CachingProcessStateDeploymentManagerSpec delegate, 10 seconds, NoDeploymentSynchronisationSupport, - NoStateQueryForAllScenariosSupport, + NoDeploymentsStatusesQueryForAllScenariosSupport, NoSchedulingSupport, ) @@ -41,7 +41,7 @@ class CachingProcessStateDeploymentManagerSpec results.map(_.cached) should contain only false results.map(_.value).distinct should have size 2 - verify(delegate, times(2)).getProcessStates(any[ProcessName])(any[DataFreshnessPolicy]) + verify(delegate, times(2)).getScenarioDeploymentsStatuses(any[ProcessName])(any[DataFreshnessPolicy]) } test("should cache state for DataFreshnessPolicy.CanBeCached") { @@ -50,7 +50,7 @@ class CachingProcessStateDeploymentManagerSpec delegate, 10 seconds, NoDeploymentSynchronisationSupport, - NoStateQueryForAllScenariosSupport, + NoDeploymentsStatusesQueryForAllScenariosSupport, NoSchedulingSupport, ) @@ -60,7 +60,7 @@ class CachingProcessStateDeploymentManagerSpec secondInvocation.cached shouldBe true List(firstInvocation, secondInvocation).map(_.value).distinct should have size 1 - verify(delegate, times(1)).getProcessStates(any[ProcessName])(any[DataFreshnessPolicy]) + verify(delegate, times(1)).getScenarioDeploymentsStatuses(any[ProcessName])(any[DataFreshnessPolicy]) } test("should reuse state updated by DataFreshnessPolicy.Fresh during reading with DataFreshnessPolicy.CanBeCached") { @@ -69,7 +69,7 @@ class CachingProcessStateDeploymentManagerSpec delegate, 10 seconds, NoDeploymentSynchronisationSupport, - NoStateQueryForAllScenariosSupport, + NoDeploymentsStatusesQueryForAllScenariosSupport, NoSchedulingSupport, ) @@ -79,27 +79,28 @@ class CachingProcessStateDeploymentManagerSpec resultForCanBeCached.cached shouldBe true List(resultForFresh, resultForCanBeCached).map(_.value).distinct should have size 1 - verify(delegate, times(1)).getProcessStates(any[ProcessName])(any[DataFreshnessPolicy]) + verify(delegate, times(1)).getScenarioDeploymentsStatuses(any[ProcessName])(any[DataFreshnessPolicy]) } implicit class DeploymentManagerOps(dm: DeploymentManager) { def getProcessStatesDeploymentIdNow(freshnessPolicy: DataFreshnessPolicy): WithDataFreshnessStatus[List[String]] = - dm.getProcessStates(ProcessName("foo"))(freshnessPolicy) + dm.getScenarioDeploymentsStatuses(ProcessName("foo"))(freshnessPolicy) .futureValue - .map(_.map(_.externalDeploymentId.value.value)) + .map(_.map(_.deploymentId.value.value)) } private def prepareDMReturningRandomStates: DeploymentManager = { val delegate = mock[DeploymentManager] - when(delegate.getProcessStates(any[ProcessName])(any[DataFreshnessPolicy])).thenAnswer { _: InvocationOnMock => - val randomState = StatusDetails( - SimpleStateStatus.Running, - deploymentId = None, - externalDeploymentId = Some(ExternalDeploymentId(UUID.randomUUID().toString)) - ) - Future.successful(WithDataFreshnessStatus.fresh(List(randomState))) + when(delegate.getScenarioDeploymentsStatuses(any[ProcessName])(any[DataFreshnessPolicy])).thenAnswer { + _: InvocationOnMock => + val randomState = DeploymentStatusDetails( + SimpleStateStatus.Running, + deploymentId = Some(DeploymentId(UUID.randomUUID().toString)), + version = None, + ) + Future.successful(WithDataFreshnessStatus.fresh(List(randomState))) } delegate } diff --git a/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioStatusDto.scala b/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioStatusDto.scala new file mode 100644 index 00000000000..91a971a561c --- /dev/null +++ b/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioStatusDto.scala @@ -0,0 +1,50 @@ +package pl.touk.nussknacker.restmodel.scenariodetails + +import io.circe._ +import io.circe.generic.JsonCodec +import pl.touk.nussknacker.engine.api.deployment.{ScenarioActionName, ScenarioVersionId} + +import java.net.URI + +/** + * Represents status of a scenario. + * Contains: + * - status itself and its evaluation moment: status, startTime + * - how to display in UI: icon, tooltip, description + * - which actions are allowed: allowedActions + * + * Statuses definition, allowed actions and current scenario presentation is defined by [[ProcessStateDefinitionManager]]. + * @param description Short message displayed in top right panel of scenario diagram panel. + * @param tooltip Message displayed when mouse is hoovering over an icon (both scenarios and diagram panel). + * May contain longer, detailed status description. + */ +@JsonCodec case class ScenarioStatusDto( + // TODO: flatten it + status: ScenarioStatusNameWrapperDto, + visibleActions: List[ScenarioActionName], + allowedActions: List[ScenarioActionName], + actionTooltips: Map[ScenarioActionName, String], + icon: URI, + tooltip: String, + description: String, +) + +@JsonCodec case class ScenarioStatusNameWrapperDto(name: String) + +object ScenarioStatusDto { + implicit val uriEncoder: Encoder[URI] = Encoder.encodeString.contramap(_.toString) + implicit val uriDecoder: Decoder[URI] = Decoder.decodeString.map(URI.create) + implicit val scenarioVersionIdEncoder: Encoder[ScenarioVersionId] = Encoder.encodeLong.contramap(_.value) + implicit val scenarioVersionIdDecoder: Decoder[ScenarioVersionId] = Decoder.decodeLong.map(ScenarioVersionId.apply) + + implicit val scenarioActionNameEncoder: Encoder[ScenarioActionName] = + Encoder.encodeString.contramap(ScenarioActionName.serialize) + implicit val scenarioActionNameDecoder: Decoder[ScenarioActionName] = + Decoder.decodeString.map(ScenarioActionName.deserialize) + + implicit val scenarioActionNameKeyDecoder: KeyDecoder[ScenarioActionName] = + (key: String) => Some(ScenarioActionName.deserialize(key)) + implicit val scenarioActionNameKeyEncoder: KeyEncoder[ScenarioActionName] = (name: ScenarioActionName) => + ScenarioActionName.serialize(name) + +} diff --git a/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioWithDetails.scala b/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioWithDetails.scala index 76a567d9117..881aafc51e5 100644 --- a/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioWithDetails.scala +++ b/designer/restmodel/src/main/scala/pl/touk/nussknacker/restmodel/scenariodetails/ScenarioWithDetails.scala @@ -3,7 +3,7 @@ package pl.touk.nussknacker.restmodel.scenariodetails import io.circe.{Decoder, Encoder} import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.component.ProcessingMode -import pl.touk.nussknacker.engine.api.deployment.{ProcessAction, ProcessState} +import pl.touk.nussknacker.engine.api.deployment.ProcessAction import pl.touk.nussknacker.engine.api.graph.ScenarioGraph import pl.touk.nussknacker.engine.api.process._ import pl.touk.nussknacker.engine.deployment.EngineSetupName @@ -41,7 +41,7 @@ final case class ScenarioWithDetails( override val validationResult: Option[ValidationResult], override val history: Option[List[ScenarioVersion]], override val modelVersion: Option[Int], - state: Option[ProcessState] + state: Option[ScenarioStatusDto] ) extends BaseScenarioWithDetailsForMigrations { def parameters: ScenarioParameters = ScenarioParameters(processingMode, processCategory, engineSetupName) diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/AppApiHttpService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/AppApiHttpService.scala index 1892984ab2f..3d4f75316ba 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/AppApiHttpService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/AppApiHttpService.scala @@ -3,13 +3,13 @@ package pl.touk.nussknacker.ui.api import com.typesafe.config.{Config, ConfigRenderOptions} import com.typesafe.scalalogging.LazyLogging import io.circe.parser -import pl.touk.nussknacker.engine.api.deployment.ProcessState import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus import pl.touk.nussknacker.engine.api.modelinfo.ModelInfo import pl.touk.nussknacker.engine.api.process.{ProcessName, ProcessingType} import pl.touk.nussknacker.engine.util.ExecutionContextWithIORuntime import pl.touk.nussknacker.engine.util.Implicits.RichTupleList import pl.touk.nussknacker.engine.version.BuildInfo +import pl.touk.nussknacker.restmodel.scenariodetails.{ScenarioStatusDto, ScenarioStatusNameWrapperDto} import pl.touk.nussknacker.ui.api.description.AppApiEndpoints import pl.touk.nussknacker.ui.api.description.AppApiEndpoints.Dtos._ import pl.touk.nussknacker.ui.process.ProcessService.GetScenarioWithDetailsOptions @@ -165,15 +165,19 @@ class AppApiHttpService( } } - private def problemStateByProcessName(implicit user: LoggedUser): Future[Map[ProcessName, ProcessState]] = { + private def problemStateByProcessName(implicit user: LoggedUser): Future[Map[ProcessName, ScenarioStatusDto]] = { for { processes <- processService.getLatestProcessesWithDetails( ScenarioQuery.deployed, GetScenarioWithDetailsOptions.detailsOnly.copy(fetchState = true) ) statusMap = processes.flatMap(process => process.state.map(process.name -> _)).toMap + // TODO: we should use domain objects instead of DTOs withProblem = statusMap.collect { - case (name, processStatus @ ProcessState(_, _ @ProblemStateStatus(_, _), _, _, _, _, _, _, _, _, _, _)) => + case ( + name, + processStatus @ ScenarioStatusDto(ScenarioStatusNameWrapperDto(ProblemStateStatus.name), _, _, _, _, _, _) + ) => (name, processStatus) } } yield withProblem diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ManagementResources.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ManagementResources.scala index 62d6156d727..349f85d1d55 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ManagementResources.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ManagementResources.scala @@ -70,7 +70,6 @@ class ManagementResources( dispatcher: DeploymentManagerDispatcher, metricRegistry: MetricRegistry, scenarioTestServices: ProcessingTypeDataProvider[ScenarioTestService, _], - typeToConfig: ProcessingTypeDataProvider[ModelData, _] )(implicit val ec: ExecutionContext) extends Directives with LazyLogging diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesExportResources.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesExportResources.scala index e48d3531517..ae73ee4eac1 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesExportResources.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesExportResources.scala @@ -10,9 +10,7 @@ import pl.touk.nussknacker.engine.api.graph.ScenarioGraph import pl.touk.nussknacker.engine.api.process.{ProcessName, ProcessingType} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.ui.api.description.scenarioActivity.Dtos.Legacy.ProcessActivity -import pl.touk.nussknacker.ui.api.utils.ScenarioDetailsOps.ScenarioWithDetailsOps import pl.touk.nussknacker.ui.process.ProcessService -import pl.touk.nussknacker.ui.process.label.ScenarioLabel import pl.touk.nussknacker.ui.process.marshall.CanonicalProcessConverter import pl.touk.nussknacker.ui.process.processingtype.provider.ProcessingTypeDataProvider import pl.touk.nussknacker.ui.process.repository.activities.ScenarioActivityRepository diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesResources.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesResources.scala index dba456afb3f..c0b310e1597 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesResources.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ProcessesResources.scala @@ -10,6 +10,7 @@ import io.circe.syntax.EncoderOps import pl.touk.nussknacker.engine.api.deployment.DataFreshnessPolicy import pl.touk.nussknacker.engine.api.process.{ProcessName, VersionId} import pl.touk.nussknacker.engine.util.Implicits._ +import pl.touk.nussknacker.restmodel.scenariodetails.ScenarioStatusDto import pl.touk.nussknacker.ui._ import pl.touk.nussknacker.ui.listener.ProcessChangeEvent._ import pl.touk.nussknacker.ui.listener.{ProcessChangeEvent, ProcessChangeListener, User} @@ -21,7 +22,7 @@ import pl.touk.nussknacker.ui.process.ProcessService.{ } import pl.touk.nussknacker.ui.process.ScenarioWithDetailsConversions._ import pl.touk.nussknacker.ui.process._ -import pl.touk.nussknacker.ui.process.deployment.ScenarioStateProvider +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.security.api.LoggedUser import pl.touk.nussknacker.ui.util._ @@ -29,7 +30,8 @@ import scala.concurrent.{ExecutionContext, Future} class ProcessesResources( protected val processService: ProcessService, - scenarioStateProvider: ScenarioStateProvider, + scenarioStatusProvider: ScenarioStatusProvider, + scenarioStatusPresenter: ScenarioStatusPresenter, processToolbarService: ScenarioToolbarService, val processAuthorizer: AuthorizeProcess, processChangeListener: ProcessChangeListener @@ -208,9 +210,12 @@ class ProcessesResources( currentlyPresentedVersionIdParameter { currentlyPresentedVersionId => complete { implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - scenarioStateProvider - .getProcessState(processId, currentlyPresentedVersionId) - .map(ToResponseMarshallable(_)) + for { + scenarioDetails <- processService + .getLatestProcessWithDetails(processId, GetScenarioWithDetailsOptions.detailsOnly) + statusDetails <- scenarioStatusProvider.getScenarioStatus(processId) + dto = scenarioStatusPresenter.toDto(statusDetails, scenarioDetails, currentlyPresentedVersionId) + } yield dto } } } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ScenarioStatusPresenter.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ScenarioStatusPresenter.scala new file mode 100644 index 00000000000..9ea0ca556d9 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/api/ScenarioStatusPresenter.scala @@ -0,0 +1,42 @@ +package pl.touk.nussknacker.ui.api + +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext +import pl.touk.nussknacker.engine.api.deployment.StateStatus +import pl.touk.nussknacker.engine.api.process.VersionId +import pl.touk.nussknacker.restmodel.scenariodetails.{ + ScenarioStatusDto, + ScenarioStatusNameWrapperDto, + ScenarioWithDetails +} +import pl.touk.nussknacker.ui.process.deployment.DeploymentManagerDispatcher +import pl.touk.nussknacker.ui.security.api.LoggedUser + +class ScenarioStatusPresenter(dispatcher: DeploymentManagerDispatcher) { + + def toDto( + scenarioStatus: StateStatus, + processDetails: ScenarioWithDetails, + currentlyPresentedVersionId: Option[VersionId] + )(implicit user: LoggedUser): ScenarioStatusDto = { + val presentation = dispatcher + .deploymentManagerUnsafe(processDetails.processingType) + .processStateDefinitionManager + .statusPresentation( + ScenarioStatusWithScenarioContext( + scenarioStatus = scenarioStatus, + deployedVersionId = processDetails.lastDeployedAction.map(_.processVersionId), + currentlyPresentedVersionId = currentlyPresentedVersionId + ) + ) + ScenarioStatusDto( + status = ScenarioStatusNameWrapperDto(scenarioStatus.name), + visibleActions = presentation.visibleActions, + allowedActions = presentation.allowedActions.toList.sortBy(_.value), + actionTooltips = presentation.actionTooltips, + icon = presentation.icon, + tooltip = presentation.tooltip, + description = presentation.description, + ) + } + +} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ProcessService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ProcessService.scala index 3ce05bca864..c60b37fe8cd 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ProcessService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ProcessService.scala @@ -1,8 +1,9 @@ package pl.touk.nussknacker.ui.process import cats._ +import cats.data.Ior.Both import cats.data.Validated -import cats.implicits.toTraverseOps +import cats.implicits.{toAlignOps, toTraverseOps} import cats.syntax.functor._ import com.typesafe.scalalogging.LazyLogging import db.util.DBIOActionInstances.DB @@ -20,9 +21,10 @@ import pl.touk.nussknacker.restmodel.scenariodetails.{BaseCreateScenarioCommand, import pl.touk.nussknacker.restmodel.validation.ScenarioGraphWithValidationResult import pl.touk.nussknacker.ui.NuDesignerError import pl.touk.nussknacker.ui.api.ProcessesResources.ProcessUnmarshallingError +import pl.touk.nussknacker.ui.api.ScenarioStatusPresenter import pl.touk.nussknacker.ui.process.ProcessService._ import pl.touk.nussknacker.ui.process.ScenarioWithDetailsConversions._ -import pl.touk.nussknacker.ui.process.deployment.ScenarioStateProvider +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.exception.{ProcessIllegalAction, ProcessValidationError} import pl.touk.nussknacker.ui.process.label.ScenarioLabel import pl.touk.nussknacker.ui.process.marshall.CanonicalProcessConverter @@ -178,7 +180,8 @@ trait ProcessService { * Each action includes verification based on actual process state and checking process is fragment / archived. */ class DBProcessService( - processStateProvider: ScenarioStateProvider, + scenarioStatusProvider: ScenarioStatusProvider, + scenarioStatusPresenter: ScenarioStatusPresenter, newProcessPreparers: ProcessingTypeDataProvider[NewProcessPreparer, _], scenarioParametersServiceProvider: ProcessingTypeDataProvider[_, ScenarioParametersService], processResolverByProcessingType: ProcessingTypeDataProvider[UIProcessResolver, _], @@ -254,7 +257,7 @@ class DBProcessService( def apply[PS: ScenarioShapeFetchStrategy]: Future[F[ScenarioWithDetailsEntity[PS]]] } - private def doGetProcessWithDetails[F[_]: Traverse]( + private def doGetProcessWithDetails[F[_]: Traverse: Align]( fetchScenario: FetchScenarioFun[F], options: GetScenarioWithDetailsOptions )( @@ -278,11 +281,24 @@ class DBProcessService( case skipFieldsOption: SkipAdditionalFields => ScenarioWithDetailsConversions.skipAdditionalFields(details, skipFieldsOption) } - }).flatMap { details => + }).flatMap { scenarioDetailsTraverse => if (options.fetchState) - processStateProvider.enrichDetailsWithProcessState(details) + scenarioStatusProvider.getScenariosStatuses(scenarioDetailsTraverse.map(_.toEntity)).map { + statusesDetailsTraverse => + scenarioDetailsTraverse.alignWith(statusesDetailsTraverse) { + case Both(scenarioDetails, scenarioStatusOpt) => + scenarioDetails.copy(state = + scenarioStatusOpt + .map(scenarioStatusPresenter.toDto(_, scenarioDetails, currentlyPresentedVersionId = None)) + ) + case other => + throw new IllegalStateException( + s"Traverse with different sizes during scenario status enrichment: $other" + ) + } + } else - Future.successful(details) + Future.successful(scenarioDetailsTraverse) } } @@ -537,15 +553,15 @@ class DBProcessService( callback: => Future[T] )(implicit user: LoggedUser): Future[T] = { implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - processStateProvider - .getProcessState(process.toEntity) - .flatMap(state => { - if (state.allowedActions.contains(actionToCheck)) { + scenarioStatusProvider + .getAllowedActionsForScenarioStatus(process.toEntity) + .flatMap { statusWithAllowedActions => + if (statusWithAllowedActions.allowedActions.contains(actionToCheck)) { callback } else { - throw ProcessIllegalAction(actionToCheck, process.name, state) + throw ProcessIllegalAction(actionToCheck, process.name, statusWithAllowedActions) } - }) + } } private def doArchive(process: ScenarioWithDetails)(implicit user: LoggedUser): Future[Unit] = diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ScenarioWithDetailsConversions.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ScenarioWithDetailsConversions.scala index c878ae06c5f..1bcafc46e90 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ScenarioWithDetailsConversions.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/ScenarioWithDetailsConversions.scala @@ -76,7 +76,8 @@ object ScenarioWithDetailsConversions { implicit class Ops(scenarioWithDetails: ScenarioWithDetails) { - // TODO: Instead of doing these conversions below, wee should pass around ScenarioWithDetails + // TODO: Instead of doing these conversions below, wee should transform ScenarioWithDetailsEntity with some additional context + // and build DTO at the end def toEntity: ScenarioWithDetailsEntity[Unit] = { toEntity(()) } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ActionService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ActionService.scala index e2ef11a8dcb..0305477ac09 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ActionService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ActionService.scala @@ -8,6 +8,10 @@ import pl.touk.nussknacker.engine.api.process._ import pl.touk.nussknacker.ui.api.{DeploymentCommentSettings, ListenerApiUser} import pl.touk.nussknacker.ui.listener.ProcessChangeEvent.{OnActionExecutionFinished, OnActionFailed, OnActionSuccess} import pl.touk.nussknacker.ui.listener.{ProcessChangeListener, User => ListenerUser} +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.{ + ScenarioStatusProvider, + ScenarioStatusWithAllowedActions +} import pl.touk.nussknacker.ui.process.exception.ProcessIllegalAction import pl.touk.nussknacker.ui.process.repository.ProcessDBQueryRepository.ProcessNotFoundError import pl.touk.nussknacker.ui.process.repository._ @@ -22,14 +26,13 @@ import scala.util.{Failure, Success} // Responsibility of this class is to wrap deployment actions with persistent, transactional context. // It ensures that all actions are done consistently: do validations and ensures that only allowed actions // will be executed in given state. It sends notifications about finished actions. -// Also thanks to it we are able to check if state on remote engine is the same as persisted state. +// Also thanks to that, we are able to check if state on remote engine is the same as persisted state. class ActionService( - dispatcher: DeploymentManagerDispatcher, processRepository: FetchingProcessRepository[DB], actionRepository: ScenarioActionRepository, dbioRunner: DBIOActionRunner, processChangeListener: ProcessChangeListener, - scenarioStateProvider: ScenarioStateProvider, + scenarioStatusProvider: ScenarioStatusProvider, deploymentCommentSettings: Option[DeploymentCommentSettings], clock: Clock )(implicit ec: ExecutionContext) @@ -61,7 +64,7 @@ class ActionService( _ <- validateExpectedProcessingType(expectedProcessingType, processId) lastStateAction <- actionRepository.getFinishedProcessActions( processId, - Some(ScenarioActionName.StateActions) + Some(ScenarioActionName.ScenarioStatusActions) ) } yield lastStateAction.headOption } @@ -122,18 +125,12 @@ class ActionService( ] => Option[VersionId] ) { - def processAction[COMMAND <: ScenarioCommand[RESULT], RESULT]( - command: COMMAND, - actionName: ScenarioActionName, - dmCommandCreator: CommandContext[LatestScenarioDetailsShape] => DMScenarioCommand[RESULT], + def processAction[COMMAND <: ScenarioCommand[RESULT], RESULT](command: COMMAND, actionName: ScenarioActionName)( + runAction: CommandContext[LatestScenarioDetailsShape] => Future[RESULT], ): Future[RESULT] = { - import command.commonData._ processActionWithCustomFinalization[COMMAND, RESULT](command, actionName) { case (ctx, actionFinalizer) => - val dmCommand = dmCommandCreator(ctx) actionFinalizer.handleResult { - dispatcher - .deploymentManagerUnsafe(ctx.latestScenarioDetails.processingType) - .processCommand(dmCommand) + runAction(ctx) } } } @@ -181,11 +178,8 @@ class ActionService( // 1.3. check if action is performed on proper scenario (not fragment, not archived) _ = checkIfCanPerformActionOnScenario(actionName, processDetails) // 1.4. check if action is allowed for current state - processState <- scenarioStateProvider.getProcessStateDBIO( - processDetails, - None - ) - _ = checkIfCanPerformActionInState(actionName, processDetails, processState) + stateWithAllowedActions <- scenarioStatusProvider.getAllowedActionsForScenarioStatusDBIO(processDetails) + _ = checkIfCanPerformActionInState(actionName, processDetails, stateWithAllowedActions) // 1.5. calculate which scenario version is affected by the action: latest for deploy, deployed for cancel versionOnWhichActionIsDone = extractVersionOnWhichActionIsDoneFromLatestScenarioDetails(processDetails) // 1.6. create new action, action is started with "in progress" state, the whole command execution can take some time @@ -216,12 +210,13 @@ class ActionService( private def checkIfCanPerformActionInState( actionName: ScenarioActionName, processDetails: ScenarioWithDetailsEntity[LatestScenarioDetailsShape], - ps: ProcessState + statusWithAllowedActions: ScenarioStatusWithAllowedActions ): Unit = { - val allowedActions = ps.allowedActions.toSet - if (!allowedActions.contains(actionName)) { - logger.debug(s"Action: $actionName on process: ${processDetails.name} not allowed in ${ps.status} state") - throw ProcessIllegalAction(actionName, processDetails.name, ps.status.name, allowedActions) + if (!statusWithAllowedActions.allowedActions.contains(actionName)) { + logger.debug( + s"Action: $actionName on process: ${processDetails.name} not allowed in ${statusWithAllowedActions.scenarioStatus} state" + ) + throw ProcessIllegalAction(actionName, processDetails.name, statusWithAllowedActions) } } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentService.scala index 9f2a65398e8..9de53ff3d07 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentService.scala @@ -50,30 +50,34 @@ class DeploymentService( // TODO: This inconsistent action-state handling needs a fix. actionService .actionProcessorForVersion[Unit](_.lastDeployedAction.map(_.processVersionId)) - .processAction( - command = command, - actionName = ScenarioActionName.Cancel, - dmCommandCreator = _ => - DMCancelScenarioCommand( - command.commonData.processIdWithName.name, - command.commonData.user.toManagerUser + .processAction[CancelScenarioCommand, Unit](command = command, actionName = ScenarioActionName.Cancel) { ctx => + import command.commonData._ + dispatcher + .deploymentManagerUnsafe(ctx.latestScenarioDetails.processingType) + .processCommand( + DMCancelScenarioCommand(command.commonData.processIdWithName.name, command.commonData.user.toManagerUser) ) - ) + } } private def runOffSchedule(command: RunOffScheduleCommand): Future[RunOffScheduleResult] = { actionService .actionProcessorForLatestVersion[CanonicalProcess] - .processAction( + .processAction[RunOffScheduleCommand, RunOffScheduleResult]( command = command, - actionName = ScenarioActionName.RunOffSchedule, - dmCommandCreator = ctx => - DMRunOffScheduleCommand( - ctx.latestScenarioDetails.toEngineProcessVersion, - ctx.latestScenarioDetails.json, - command.commonData.user.toManagerUser, + actionName = ScenarioActionName.RunOffSchedule + ) { ctx => + import command.commonData._ + dispatcher + .deploymentManagerUnsafe(ctx.latestScenarioDetails.processingType) + .processCommand( + DMRunOffScheduleCommand( + ctx.latestScenarioDetails.toEngineProcessVersion, + ctx.latestScenarioDetails.json, + command.commonData.user.toManagerUser, + ) ) - ) + } } private def runDeployment(command: RunDeploymentCommand): Future[Future[Option[ExternalDeploymentId]]] = { diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioStateProvider.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioStateProvider.scala deleted file mode 100644 index f40f4c0360c..00000000000 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioStateProvider.scala +++ /dev/null @@ -1,436 +0,0 @@ -package pl.touk.nussknacker.ui.process.deployment - -import akka.actor.ActorSystem -import cats.Traverse -import cats.implicits.{toFoldableOps, toTraverseOps} -import cats.syntax.functor._ -import com.typesafe.scalalogging.LazyLogging -import db.util.DBIOActionInstances._ -import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName.{Cancel, Deploy} -import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} -import pl.touk.nussknacker.engine.api.process._ -import pl.touk.nussknacker.engine.util.WithDataFreshnessStatusUtils.{ - WithDataFreshnessStatusMapOps, - WithDataFreshnessStatusOps -} -import pl.touk.nussknacker.restmodel.scenariodetails.ScenarioWithDetails -import pl.touk.nussknacker.ui.BadRequestError -import pl.touk.nussknacker.ui.process.ScenarioWithDetailsConversions.Ops -import pl.touk.nussknacker.ui.process.deployment.ScenarioStateProvider.FragmentStateException -import pl.touk.nussknacker.ui.process.repository.ProcessDBQueryRepository.ProcessNotFoundError -import pl.touk.nussknacker.ui.process.repository._ -import pl.touk.nussknacker.ui.security.api.LoggedUser -import pl.touk.nussknacker.ui.util.FutureUtils._ -import slick.dbio.{DBIO, DBIOAction} - -import scala.concurrent.duration._ -import scala.concurrent.{ExecutionContext, Future} -import scala.language.higherKinds -import scala.util.control.NonFatal - -trait ScenarioStateProvider { - - def enrichDetailsWithProcessState[F[_]: Traverse](processTraverse: F[ScenarioWithDetails])( - implicit user: LoggedUser, - freshnessPolicy: DataFreshnessPolicy - ): Future[F[ScenarioWithDetails]] - - def getProcessState( - processDetails: ScenarioWithDetailsEntity[_] - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] - - def getProcessState( - processIdWithName: ProcessIdWithName, - currentlyPresentedVersionId: Option[VersionId], - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] - - def getProcessStateDBIO(processDetails: ScenarioWithDetailsEntity[_], currentlyPresentedVersionId: Option[VersionId])( - implicit user: LoggedUser, - freshnessPolicy: DataFreshnessPolicy - ): DB[ProcessState] - -} - -object ScenarioStateProvider { - - def apply( - dispatcher: DeploymentManagerDispatcher, - processRepository: FetchingProcessRepository[DB], - actionRepository: ScenarioActionRepository, - dbioRunner: DBIOActionRunner, - scenarioStateTimeout: Option[FiniteDuration] - )(implicit system: ActorSystem): ScenarioStateProvider = - new ScenarioStateProviderImpl(dispatcher, processRepository, actionRepository, dbioRunner, scenarioStateTimeout) - - object FragmentStateException extends BadRequestError("Fragment doesn't have state.") - -} - -private class ScenarioStateProviderImpl( - dispatcher: DeploymentManagerDispatcher, - processRepository: FetchingProcessRepository[DB], - actionRepository: ScenarioActionRepository, - dbioRunner: DBIOActionRunner, - scenarioStateTimeout: Option[FiniteDuration] -)(implicit system: ActorSystem) - extends ScenarioStateProvider - with LazyLogging { - - private implicit val ec: ExecutionContext = system.dispatcher - - // TODO: check deployment id to be sure that returned status is for given deployment - def getProcessState( - processIdWithName: ProcessIdWithName, - currentlyPresentedVersionId: Option[VersionId], - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] = { - dbioRunner.run(for { - processDetailsOpt <- processRepository.fetchLatestProcessDetailsForProcessId[Unit](processIdWithName.id) - processDetails <- existsOrFail(processDetailsOpt, ProcessNotFoundError(processIdWithName.name)) - result <- getProcessStateDBIO( - processDetails, - currentlyPresentedVersionId - ) - } yield result) - } - - def getProcessStateDBIO( - processDetails: ScenarioWithDetailsEntity[_], - currentlyPresentedVersionId: Option[VersionId] - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): DB[ProcessState] = { - for { - inProgressActionNames <- actionRepository.getInProgressActionNames(processDetails.processId) - result <- getProcessStateFetchingStatusFromManager( - processDetails, - inProgressActionNames, - currentlyPresentedVersionId - ) - } yield result - } - - def getProcessState( - processDetails: ScenarioWithDetailsEntity[_] - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] = { - dbioRunner.run(for { - inProgressActionNames <- actionRepository.getInProgressActionNames(processDetails.processId) - result <- getProcessStateFetchingStatusFromManager(processDetails, inProgressActionNames, None) - } yield result) - } - - def enrichDetailsWithProcessState[F[_]: Traverse](processTraverse: F[ScenarioWithDetails])( - implicit user: LoggedUser, - freshnessPolicy: DataFreshnessPolicy - ): Future[F[ScenarioWithDetails]] = { - val scenarios = processTraverse.toList - dbioRunner.run( - for { - actionsInProgress <- getInProgressActionTypesForScenarios(scenarios) - prefetchedStates <- DBIO.from(getPrefetchedStatesForSupportedManagers(scenarios)) - processesWithState <- processTraverse - .map { - case process if process.isFragment => DBIO.successful(process) - case process => - val prefetchedState = for { - prefetchedStatesForProcessingType <- prefetchedStates.get(process.processingType) - // State is prefetched for all scenarios for the given processing type. - // If there is no information available for a specific scenario name, - // then it means that DM is not aware of this scenario, and we should default to List.empty[StatusDetails]. - prefetchedState = prefetchedStatesForProcessingType.getOrElse(process.name, List.empty) - } yield prefetchedState - prefetchedState match { - case Some(prefetchedStatusDetails) => - getProcessStateUsingPrefetchedStatus( - process.toEntity, - actionsInProgress.getOrElse(process.processIdUnsafe, Set.empty), - None, - prefetchedStatusDetails, - ).map(state => process.copy(state = Some(state))) - case None => - getProcessStateFetchingStatusFromManager( - process.toEntity, - actionsInProgress.getOrElse(process.processIdUnsafe, Set.empty), - None, - ).map(state => process.copy(state = Some(state))) - } - } - .sequence[DB, ScenarioWithDetails] - } yield processesWithState - ) - } - - private def getProcessStateFetchingStatusFromManager( - processDetails: ScenarioWithDetailsEntity[_], - inProgressActionNames: Set[ScenarioActionName], - currentlyPresentedVersionId: Option[VersionId], - )(implicit freshnessPolicy: DataFreshnessPolicy, user: LoggedUser): DB[ProcessState] = { - getProcessState( - processDetails, - inProgressActionNames, - currentlyPresentedVersionId, - manager => - getStateFromDeploymentManager( - manager, - processDetails.idWithName, - processDetails.lastStateAction, - processDetails.processVersionId, - processDetails.lastDeployedAction.map(_.processVersionId), - currentlyPresentedVersionId, - ) - ) - } - - // DeploymentManager's may support fetching state of all scenarios at once - // State is prefetched only when: - // - DM has capability StateQueryForAllScenariosSupported - // - the query is about more than one scenario handled by that DM - private def getPrefetchedStatesForSupportedManagers( - scenarios: List[ScenarioWithDetails], - )( - implicit user: LoggedUser, - freshnessPolicy: DataFreshnessPolicy - ): Future[Map[ProcessingType, WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]]] = { - val allProcessingTypes = scenarios.map(_.processingType).toSet - val numberOfScenariosByProcessingType = - allProcessingTypes - .map(processingType => (processingType, scenarios.count(_.processingType == processingType))) - .toMap - val processingTypesWithMoreThanOneScenario = numberOfScenariosByProcessingType.filter(_._2 > 1).keys - - Future - .sequence { - processingTypesWithMoreThanOneScenario.map { processingType => - (for { - manager <- dispatcher.deploymentManager(processingType) - managerWithCapability <- manager.stateQueryForAllScenariosSupport match { - case supported: StateQueryForAllScenariosSupported => Some(supported) - case NoStateQueryForAllScenariosSupport => None - } - } yield getAllProcessesStates(processingType, managerWithCapability)) - .getOrElse(Future.successful(None)) - } - } - .map(_.flatten.toMap) - } - - private def getAllProcessesStates(processingType: ProcessingType, manager: StateQueryForAllScenariosSupported)( - implicit freshnessPolicy: DataFreshnessPolicy, - ): Future[Option[(ProcessingType, WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]])]] = { - manager - .getAllProcessesStates() - .map(states => Some((processingType, states))) - .recover { case NonFatal(e) => - logger.warn( - s"Failed to get statuses of all scenarios in deployment manager for $processingType: ${e.getMessage}", - e - ) - None - } - } - - // This is optimisation tweak. We want to reduce number of calls for in progress action types. So for >1 scenarios - // we do one call for all in progress action types for all scenarios - private def getInProgressActionTypesForScenarios( - scenarios: List[ScenarioWithDetails] - ): DB[Map[ProcessId, Set[ScenarioActionName]]] = { - scenarios match { - case Nil => DBIO.successful(Map.empty) - case head :: Nil => - actionRepository - .getInProgressActionNames(head.processIdUnsafe) - .map(actionNames => Map(head.processIdUnsafe -> actionNames)) - case _ => - // We are getting only Deploy and Cancel InProgress actions as only these two impact ProcessState - actionRepository.getInProgressActionNames(Set(Deploy, Cancel)) - } - } - - private def getProcessStateUsingPrefetchedStatus( - processDetails: ScenarioWithDetailsEntity[_], - inProgressActionNames: Set[ScenarioActionName], - currentlyPresentedVersionId: Option[VersionId], - prefetchedStatusDetails: WithDataFreshnessStatus[List[StatusDetails]], - )(implicit user: LoggedUser): DB[ProcessState] = { - getProcessState( - processDetails, - inProgressActionNames, - currentlyPresentedVersionId, - manager => - manager - .resolve( - processDetails.idWithName, - prefetchedStatusDetails.value, - processDetails.lastStateAction, - processDetails.processVersionId, - processDetails.lastDeployedAction.map(_.processVersionId), - currentlyPresentedVersionId, - ) - .map(prefetchedStatusDetails.withValue) - ) - } - - private def getProcessState( - processDetails: ScenarioWithDetailsEntity[_], - inProgressActionNames: Set[ScenarioActionName], - currentlyPresentedVersionId: Option[VersionId], - fetchState: DeploymentManager => Future[WithDataFreshnessStatus[ProcessState]], - )(implicit user: LoggedUser): DB[ProcessState] = { - val processVersionId = processDetails.processVersionId - val deployedVersionId = processDetails.lastDeployedAction.map(_.processVersionId) - dispatcher - .deploymentManager(processDetails.processingType) - .map { manager => - if (processDetails.isFragment) { - throw FragmentStateException - } else if (processDetails.isArchived) { - getArchivedProcessState(processDetails, currentlyPresentedVersionId)(manager) - } else if (inProgressActionNames.contains(ScenarioActionName.Deploy)) { - logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.DuringDeploy}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.DuringDeploy, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - } else if (inProgressActionNames.contains(ScenarioActionName.Cancel)) { - logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.DuringCancel}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.DuringCancel, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - } else { - processDetails.lastStateAction match { - case Some(_) => - DBIOAction - .from(fetchState(manager)) - .map { statusWithFreshness => - logger.debug( - s"Status for: '${processDetails.name}' is: ${statusWithFreshness.value.status}, cached: ${statusWithFreshness.cached}, last status action: ${processDetails.lastStateAction - .map(_.actionName)})" - ) - statusWithFreshness.value - } - case _ => // We assume that the process never deployed should have no state at the engine - logger.debug(s"Status for never deployed: '${processDetails.name}' is: ${SimpleStateStatus.NotDeployed}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.NotDeployed, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - } - } - } - .getOrElse( - DBIOAction.successful(SimpleProcessStateDefinitionManager.errorFailedToGet(processVersionId)) - ) - } - - // We assume that checking the state for archived doesn't make sense, and we compute the state based on the last state action - private def getArchivedProcessState( - processDetails: ScenarioWithDetailsEntity[_], - currentlyPresentedVersionId: Option[VersionId], - )(implicit manager: DeploymentManager) = { - val processVersionId = processDetails.processVersionId - val deployedVersionId = processDetails.lastDeployedAction.map(_.processVersionId) - processDetails.lastStateAction.map(a => (a.actionName, a.state)) match { - case Some((Cancel, _)) => - logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.Canceled}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.Canceled, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - case Some((Deploy, ProcessActionState.ExecutionFinished)) => - logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.Finished} ") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.Finished, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - case Some(_) => - logger.warn(s"Status for: '${processDetails.name}' is: ${ProblemStateStatus.ArchivedShouldBeCanceled}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(ProblemStateStatus.ArchivedShouldBeCanceled, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - case _ => - logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.NotDeployed}") - DBIOAction.successful( - manager.processStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.NotDeployed, None), - processVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - } - } - - private def getStateFromDeploymentManager( - deploymentManager: DeploymentManager, - processIdWithName: ProcessIdWithName, - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - )( - implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[ProcessState]] = { - - val state = deploymentManager - .getProcessState( - processIdWithName, - lastStateAction, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - .recover { case NonFatal(e) => - logger.warn(s"Failed to get status of ${processIdWithName.name}: ${e.getMessage}", e) - failedToGetProcessState(latestVersionId) - } - - scenarioStateTimeout - .map { timeout => - state.withTimeout(timeout, timeoutResult = failedToGetProcessState(latestVersionId)).map { - case CompletedNormally(value) => - value - case CompletedByTimeout(value) => - logger - .warn(s"Timeout: $timeout occurred during waiting for response from engine for ${processIdWithName.name}") - value - } - } - .getOrElse(state) - } - - private def failedToGetProcessState(versionId: VersionId) = - WithDataFreshnessStatus.fresh(SimpleProcessStateDefinitionManager.errorFailedToGet(versionId)) - - private def existsOrFail[T](checkThisOpt: Option[T], failWith: => Exception): DB[T] = { - checkThisOpt match { - case Some(checked) => DBIOAction.successful(checked) - case None => DBIOAction.failed(failWith) - } - } - -} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioTestExecutorService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioTestExecutorService.scala index 56c609b4bbe..7bef7060de4 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioTestExecutorService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/ScenarioTestExecutorService.scala @@ -1,13 +1,12 @@ package pl.touk.nussknacker.ui.process.deployment +import io.circe.Json +import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment.{DMTestScenarioCommand, DeploymentManager} -import pl.touk.nussknacker.engine.api.process.ProcessIdWithName import pl.touk.nussknacker.engine.api.test.ScenarioTestData import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.testmode.TestProcess.TestResults import pl.touk.nussknacker.ui.security.api.LoggedUser -import io.circe.Json -import pl.touk.nussknacker.engine.api.ProcessVersion import scala.concurrent.{ExecutionContext, Future} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/DeploymentManagerReliableStatusesWrapper.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/DeploymentManagerReliableStatusesWrapper.scala new file mode 100644 index 00000000000..a5814ecac84 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/DeploymentManagerReliableStatusesWrapper.scala @@ -0,0 +1,65 @@ +package pl.touk.nussknacker.ui.process.deployment.deploymentstatus + +import akka.actor.ActorSystem +import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, DeploymentStatusDetails, WithDataFreshnessStatus} +import pl.touk.nussknacker.engine.api.process.{ProcessName, ProcessingType} +import pl.touk.nussknacker.ui.process.deployment.DeploymentManagerDispatcher +import pl.touk.nussknacker.ui.process.repository.ScenarioIdData +import pl.touk.nussknacker.ui.security.api.LoggedUser +import pl.touk.nussknacker.ui.util.FutureUtils.FutureOps + +import scala.concurrent.Future +import scala.concurrent.duration.FiniteDuration +import scala.util.control.NonFatal + +object DeploymentManagerReliableStatusesWrapper { + + implicit class Ops(dmDispatcher: DeploymentManagerDispatcher) { + + def getScenarioDeploymentsStatusesWithErrorWrappingAndTimeoutOpt( + scenarioIdData: ScenarioIdData, + timeoutOpt: Option[FiniteDuration] + )( + implicit user: LoggedUser, + freshnessPolicy: DataFreshnessPolicy, + actorSystem: ActorSystem + ): Future[Either[GetDeploymentsStatusesError, WithDataFreshnessStatus[List[DeploymentStatusDetails]]]] = { + import actorSystem._ + val deploymentStatusesOptFuture + : Future[Either[GetDeploymentsStatusesError, WithDataFreshnessStatus[List[DeploymentStatusDetails]]]] = + dmDispatcher + .deploymentManager(scenarioIdData.processingType) + .map( + _.getScenarioDeploymentsStatuses(scenarioIdData.name) + .map(Right(_)) + .recover { case NonFatal(e) => Left(GetDeploymentsStatusesFailure(scenarioIdData.name, e)) } + ) + .getOrElse( + Future.successful(Left(ProcessingTypeIsNotConfigured(scenarioIdData.name, scenarioIdData.processingType))) + ) + + timeoutOpt + .map { timeout => + deploymentStatusesOptFuture + .withTimeout(timeout, timeoutResult = Left(GetDeploymentsStatusTimeout(scenarioIdData.name))) + } + .getOrElse(deploymentStatusesOptFuture) + } + + } + +} + +sealed abstract class GetDeploymentsStatusesError(message: String, cause: Throwable) extends Exception(message, cause) + +case class ProcessingTypeIsNotConfigured(scenarioName: ProcessName, processingType: ProcessingType) + extends GetDeploymentsStatusesError( + s"Cant' get deployments statuses for $scenarioName because processing type: $processingType is not configured", + null + ) + +case class GetDeploymentsStatusesFailure(scenarioName: ProcessName, cause: Throwable) + extends GetDeploymentsStatusesError(s"Failure during getting deployment statuses for scenario $scenarioName", cause) + +case class GetDeploymentsStatusTimeout(scenarioName: ProcessName) + extends GetDeploymentsStatusesError(s"Timeout during getting deployment statuses for scenario $scenarioName", null) diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/EngineSideDeploymentStatusesProvider.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/EngineSideDeploymentStatusesProvider.scala new file mode 100644 index 00000000000..5ba3dbe1d52 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/deploymentstatus/EngineSideDeploymentStatusesProvider.scala @@ -0,0 +1,130 @@ +package pl.touk.nussknacker.ui.process.deployment.deploymentstatus + +import akka.actor.ActorSystem +import com.typesafe.scalalogging.LazyLogging +import pl.touk.nussknacker.engine.api.deployment._ +import pl.touk.nussknacker.engine.api.process.{ProcessName, ProcessingType} +import pl.touk.nussknacker.engine.util.WithDataFreshnessStatusUtils.WithDataFreshnessStatusMapOps +import pl.touk.nussknacker.ui.process.deployment.DeploymentManagerDispatcher +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.DeploymentManagerReliableStatusesWrapper.Ops +import pl.touk.nussknacker.ui.process.repository._ +import pl.touk.nussknacker.ui.security.api.LoggedUser + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +// This class returns information about deployments basen on information from DeploymentManager's +// To have full information about DeploymentStatus'es, these information have to be merged with data from local store +// Data from local store are needed in certain situation: +// 1. when scenario deployment is requested but not yet seen on engine side (deploy action is in progress) +// 2. when scenario job was finished and was removed by retention mechanism +// 3. when scenario job have been canceled and was removed by retention mechanism +// Currently, for local store is used ActionRepository. It is quite problematic for determining the statuses. For example, +// case 3. is barely not possible to done accurately because cancel action is not correlated with deploy action +// so for two deploys done by one we won't know which one should be canceled +// TODO: Extract a new service that would should merged perspective for of deployment statuses. To do that, +// we need to change (or refactor) the local storage +class EngineSideDeploymentStatusesProvider( + dispatcher: DeploymentManagerDispatcher, + scenarioStateTimeout: Option[FiniteDuration] +)( + implicit system: ActorSystem +) extends LazyLogging { + + private implicit val ec: ExecutionContext = system.dispatcher + + // DeploymentManager's may support fetching state of all scenarios at once + // State is prefetched only when: + // - DM has capability DeploymentsStatusesQueryForAllScenariosSupport + // - the query is about more than one scenario handled by that DM - for one scenario prefetching would be non-optimal + // and this is a common case for this method because it is invoked for Id Traverse - see usages + def getBulkQueriedDeploymentStatusesForSupportedManagers( + processingTypes: Iterable[ProcessingType] + )( + implicit user: LoggedUser, + freshnessPolicy: DataFreshnessPolicy + ): Future[BulkQueriedDeploymentStatuses] = { + Future + .sequence { + processingTypes.map { processingType => + (for { + manager <- dispatcher.deploymentManager(processingType) + managerWithCapability <- manager.deploymentsStatusesQueryForAllScenariosSupport match { + case supported: DeploymentsStatusesQueryForAllScenariosSupported => Some(supported) + case NoDeploymentsStatusesQueryForAllScenariosSupport => None + } + } yield getAllDeploymentStatusesRecoveringFailure(processingType, managerWithCapability).map( + _.map(processingType -> _) + )) + .getOrElse(Future.successful(None)) + } + } + .map(_.flatten.toMap) + .map(new BulkQueriedDeploymentStatuses(_)) + } + + def getDeploymentStatuses( + scenarioIdData: ScenarioIdData, + prefetchedDeploymentStatuses: Option[BulkQueriedDeploymentStatuses] + )( + implicit user: LoggedUser, + freshnessPolicy: DataFreshnessPolicy + ): Future[Either[GetDeploymentsStatusesError, WithDataFreshnessStatus[List[DeploymentStatusDetails]]]] = { + prefetchedDeploymentStatuses + .flatMap(_.getDeploymentStatuses(scenarioIdData)) + .map { prefetchedStatusDetails => + Future.successful(Right(prefetchedStatusDetails)) + } + .getOrElse { + dispatcher.getScenarioDeploymentsStatusesWithErrorWrappingAndTimeoutOpt( + scenarioIdData, + scenarioStateTimeout + ) + } + } + + private def getAllDeploymentStatusesRecoveringFailure( + processingType: ProcessingType, + manager: DeploymentsStatusesQueryForAllScenariosSupported + )( + implicit freshnessPolicy: DataFreshnessPolicy, + ): Future[Option[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]]] = { + manager + .getAllScenariosDeploymentsStatuses() + .map(Some(_)) + .recover { case NonFatal(e) => + logger.warn( + s"Failed to get statuses of all scenarios in deployment manager for $processingType: ${e.getMessage}", + e + ) + None + } + } + +} + +class BulkQueriedDeploymentStatuses( + statusesByProcessingType: Map[ProcessingType, WithDataFreshnessStatus[ + Map[ProcessName, List[DeploymentStatusDetails]] + ]] +) { + + def getAllDeploymentStatuses: Iterable[DeploymentStatusDetails] = for { + processingTypeStatusesWithFreshness <- statusesByProcessingType.values + (_, deploymentStatuses) <- processingTypeStatusesWithFreshness.value + deploymentStatus <- deploymentStatuses + } yield deploymentStatus + + def getDeploymentStatuses( + scenarioIdData: ScenarioIdData + ): Option[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = + for { + statusesByScenarioName <- statusesByProcessingType.get(scenarioIdData.processingType) + // Deployment statuses are prefetched for all scenarios for the given processing type. + // If there is no information available for a specific scenario name, + // then it means that DM is not aware of this scenario, and we should default to List.empty[StatusDetails] instead of None + scenarioDeploymentStatuses = statusesByScenarioName.getOrElse(scenarioIdData.name, List.empty) + } yield scenarioDeploymentStatuses + +} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/FinishedDeploymentsStatusesSynchronizationScheduler.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/FinishedDeploymentsStatusesSynchronizationScheduler.scala new file mode 100644 index 00000000000..371cafe6aa0 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/FinishedDeploymentsStatusesSynchronizationScheduler.scala @@ -0,0 +1,62 @@ +package pl.touk.nussknacker.ui.process.deployment.reconciliation + +import akka.actor.{ActorSystem, Cancellable} +import cats.effect.IO +import cats.effect.kernel.Resource +import com.typesafe.config.Config +import com.typesafe.scalalogging.LazyLogging +import net.ceedubs.ficus.Ficus._ +import net.ceedubs.ficus.readers.ArbitraryTypeReader._ + +import scala.concurrent.Await +import scala.concurrent.duration._ +import scala.util.Try + +// TODO: Properly handle HA setup: synchronizeAll() should be invoked only on one instance of designer in a time +object FinishedDeploymentsStatusesSynchronizationScheduler extends LazyLogging { + + def resource( + actorSystem: ActorSystem, + reconciler: ScenarioDeploymentReconciler, + config: FinishedDeploymentsStatusesSynchronizationConfig + ): Resource[IO, Cancellable] = { + import actorSystem.dispatcher + + Resource.make(IO { + actorSystem.scheduler.scheduleAtFixedRate(0 seconds, config.delayBetweenSynchronizations) { () => + Try( + Await.result(reconciler.synchronizeEngineFinishedDeploymentsLocalStatuses(), config.synchronizationTimeout) + ).failed.foreach { ex => + logger.error( + s"Error during finished deployments statuses synchronization. Synchronization will be retried in ${config.delayBetweenSynchronizations}", + ex + ) + } + } + }) { scheduledJob => + IO { + scheduledJob.cancel() + } + } + } + +} + +final case class FinishedDeploymentsStatusesSynchronizationConfig( + // This should be lower than time during which, all archived jobs on flink will be retained. + // You can tweak this by configuring Flink's limit of jobs kept in history: web.history (default is 5 jobs limit) + // and historyserver.archive.fs.refresh-interval (default is 10 seconds) + delayBetweenSynchronizations: FiniteDuration = 5 minutes, + synchronizationTimeout: FiniteDuration = 30 seconds +) + +object FinishedDeploymentsStatusesSynchronizationConfig { + + val ConfigPath = "finishedDeploymentStatusesSynchronization" + + def parse(config: Config): FinishedDeploymentsStatusesSynchronizationConfig = + config + .getAs[FinishedDeploymentsStatusesSynchronizationConfig](ConfigPath) + .getOrElse(FinishedDeploymentsStatusesSynchronizationConfig()) + +} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/ScenarioDeploymentReconciler.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/ScenarioDeploymentReconciler.scala new file mode 100644 index 00000000000..d21a2dd0d27 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/reconciliation/ScenarioDeploymentReconciler.scala @@ -0,0 +1,54 @@ +package pl.touk.nussknacker.ui.process.deployment.reconciliation + +import com.typesafe.scalalogging.LazyLogging +import pl.touk.nussknacker.engine.api.deployment.DataFreshnessPolicy +import pl.touk.nussknacker.engine.api.deployment.DataFreshnessPolicy.Fresh +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus +import pl.touk.nussknacker.engine.api.process.ProcessingType +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.EngineSideDeploymentStatusesProvider +import pl.touk.nussknacker.ui.process.repository.{DBIOActionRunner, ScenarioActionRepository} +import pl.touk.nussknacker.ui.security.api.{LoggedUser, NussknackerInternalUser} +import slick.dbio.DBIOAction + +import scala.concurrent.{ExecutionContext, Future} + +class ScenarioDeploymentReconciler( + allProcessingTypes: => Iterable[ProcessingType], + deploymentStatusesProvider: EngineSideDeploymentStatusesProvider, + actionRepository: ScenarioActionRepository, + dbioActionRunner: DBIOActionRunner +)(implicit ec: ExecutionContext) + extends LazyLogging { + + // We have to synchronize these statuses because engines (for example Flink) might have jobs retention mechanism + // and finished jobs will disappear eventually on their side + def synchronizeEngineFinishedDeploymentsLocalStatuses(): Future[Unit] = { + implicit val user: LoggedUser = NussknackerInternalUser.instance + implicit val freshnessPolicy: DataFreshnessPolicy = Fresh + logger.debug("Synchronization of local status of finished deployments...") + for { + // Currently, synchronization is supported only for DeploymentManagers that supports DeploymentsStatusesQueryForAllScenarios + bulkQueriedStatuses <- deploymentStatusesProvider.getBulkQueriedDeploymentStatusesForSupportedManagers( + allProcessingTypes + ) + deploymentStatuses = bulkQueriedStatuses.getAllDeploymentStatuses + // We compare status by instances instead of by names. Thanks to that, PeriodicStateStatus won't be handled. + // It is an expected behaviour because schedules finished status is handled inside PeriodicProcessService + finishedDeploymentIds = deploymentStatuses.filter(_.status == SimpleStateStatus.Finished).flatMap(_.deploymentId) + actionsIds = finishedDeploymentIds.flatMap(_.toActionIdOpt) + actionsWithMarkingExecutionFinishedResult <- dbioActionRunner.run(DBIOAction.sequence(actionsIds.map { actionId => + actionRepository.markFinishedActionAsExecutionFinished(actionId).map(actionId -> _) + })) + } yield { + val actionsMarkedAsExecutionFinished = actionsWithMarkingExecutionFinishedResult.collect { + case (actionId, true) => actionId.toString + }.toList + if (actionsMarkedAsExecutionFinished.isEmpty) { + logger.debug("None action marked as execution finished") + } else { + logger.debug(actionsMarkedAsExecutionFinished.mkString("Actions marked as execution finished: ", ", ", "")) + } + } + } + +} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetector.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetector.scala new file mode 100644 index 00000000000..27d50f26e65 --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetector.scala @@ -0,0 +1,126 @@ +package pl.touk.nussknacker.ui.process.deployment.scenariostatus + +import com.typesafe.scalalogging.LazyLogging +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus +import pl.touk.nussknacker.engine.api.deployment._ +import pl.touk.nussknacker.engine.deployment.DeploymentId + +object InconsistentStateDetector extends InconsistentStateDetector + +class InconsistentStateDetector extends LazyLogging { + + def resolveScenarioStatus( + deploymentStatuses: List[DeploymentStatusDetails], + lastStateAction: ScenarioStatusActionDetails + ): StateStatus = { + val status = (doExtractAtMostOneStatus(deploymentStatuses), lastStateAction) match { + case (Left(deploymentStatus), _) => deploymentStatus.status + case (Right(None), lastAction) + if lastAction.actionName == ScenarioActionName.Deploy && lastAction.state == ProcessActionState.ExecutionFinished => + // Some engines like Flink have jobs retention. Because of that we restore finished status + SimpleStateStatus.Finished + case (Right(Some(deploymentStatus)), _) if shouldAlwaysReturnStatus(deploymentStatus) => deploymentStatus.status + case (Right(deploymentStatusOpt), lastAction) if lastAction.actionName == ScenarioActionName.Deploy => + handleLastActionDeploy(deploymentStatusOpt, lastAction) + case (Right(Some(deploymentStatus)), _) if isFollowingDeployStatus(deploymentStatus) => + handleFollowingDeployEngineSideStatus(deploymentStatus, lastStateAction) + case (Right(deploymentStatusOpt), lastAction) if lastAction.actionName == ScenarioActionName.Cancel => + handleLastActionCancel(deploymentStatusOpt) + case (Right(Some(deploymentStatus)), _) => deploymentStatus.status + case (Right(None), _) => SimpleStateStatus.NotDeployed + } + logger.debug( + s"Resolved deployment statuses: $deploymentStatuses, lastStateAction: $lastStateAction to scenario status: $status" + ) + status + } + + private[scenariostatus] def extractAtMostOneStatus( + deploymentStatuses: List[DeploymentStatusDetails] + ): Option[DeploymentStatusDetails] = + doExtractAtMostOneStatus(deploymentStatuses).fold(Some(_), identity) + + private def doExtractAtMostOneStatus( + deploymentStatuses: List[DeploymentStatusDetails] + ): Either[DeploymentStatusDetails, Option[DeploymentStatusDetails]] = { + val notFinalStatuses = deploymentStatuses.filterNot(isFinalOrTransitioningToFinalStatus) + (deploymentStatuses, notFinalStatuses) match { + case (Nil, Nil) => Right(None) + case (_, singleNotFinished :: Nil) => Right(Some(singleNotFinished)) + case (_, firstNotFinished :: _ :: _) => + Left( + firstNotFinished.copy( + status = ProblemStateStatus.multipleJobsRunning( + notFinalStatuses.map(deploymentStatus => + deploymentStatus.deploymentId.getOrElse(DeploymentId("missing")) -> deploymentStatus.status + ) + ) + ) + ) + case (firstFinished :: _, Nil) => Right(Some(firstFinished)) + } + } + + // This method handles some corner cases for canceled process -> with last action = Canceled + private def handleLastActionCancel(deploymentStatusOpt: Option[DeploymentStatusDetails]): StateStatus = + deploymentStatusOpt + .map(_.status) + .getOrElse(SimpleStateStatus.Canceled) + + // This method handles some corner cases for following deploy status mismatch last action version + private def handleFollowingDeployEngineSideStatus( + deploymentStatus: DeploymentStatusDetails, + lastStateAction: ScenarioStatusActionDetails + ): StateStatus = { + if (lastStateAction.actionName != ScenarioActionName.Deploy) + ProblemStateStatus.shouldNotBeRunning(true) + else + deploymentStatus.status + } + + // This method handles some corner cases for deployed action mismatch version + private def handleLastActionDeploy( + deploymentStatusOpt: Option[DeploymentStatusDetails], + action: ScenarioStatusActionDetails + ): StateStatus = + deploymentStatusOpt match { + case Some(deploymentStatuses) => + deploymentStatuses.version match { + case _ if !isFollowingDeployStatus(deploymentStatuses) && !isFinishedStatus(deploymentStatuses) => + logger.debug( + s"handleLastActionDeploy: is not following deploy status nor finished, but it should be. $deploymentStatuses" + ) + ProblemStateStatus.shouldBeRunning(action.processVersionId, action.user) + case Some(ver) if ver != action.processVersionId => + ProblemStateStatus.mismatchDeployedVersion(ver, action.processVersionId, action.user) + case Some(_) => + deploymentStatuses.status + case None => // TODO: we should remove Option from ProcessVersion? + ProblemStateStatus.missingDeployedVersion(action.processVersionId, action.user) + } + case None => + logger.debug( + s"handleLastActionDeploy for empty deploymentStatus. Action.processVersionId: ${action.processVersionId}" + ) + ProblemStateStatus.shouldBeRunning(action.processVersionId, action.user) + } + + private def shouldAlwaysReturnStatus(deploymentStatus: DeploymentStatusDetails): Boolean = { + ProblemStateStatus.isProblemStatus( + deploymentStatus.status + ) || deploymentStatus.status == SimpleStateStatus.Restarting + } + + private def isFollowingDeployStatus(deploymentStatus: DeploymentStatusDetails): Boolean = { + SimpleStateStatus.DefaultFollowingDeployStatuses.contains(deploymentStatus.status) + } + + private def isFinalOrTransitioningToFinalStatus(deploymentStatus: DeploymentStatusDetails): Boolean = + SimpleStateStatus.isFinalOrTransitioningToFinalStatus(deploymentStatus.status) + + private def isFinishedStatus(deploymentStatus: DeploymentStatusDetails): Boolean = { + deploymentStatus.status == SimpleStateStatus.Finished + } + +} diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/ScenarioStatusProvider.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/ScenarioStatusProvider.scala new file mode 100644 index 00000000000..9ace1349e0e --- /dev/null +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/ScenarioStatusProvider.scala @@ -0,0 +1,244 @@ +package pl.touk.nussknacker.ui.process.deployment.scenariostatus + +import cats.Traverse +import cats.implicits.{toFoldableOps, toTraverseOps} +import cats.syntax.functor._ +import com.typesafe.scalalogging.LazyLogging +import db.util.DBIOActionInstances._ +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext +import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName.{Cancel, Deploy} +import pl.touk.nussknacker.engine.api.deployment._ +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus +import pl.touk.nussknacker.engine.api.process._ +import pl.touk.nussknacker.ui.BadRequestError +import pl.touk.nussknacker.ui.process.deployment.DeploymentManagerDispatcher +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.{ + BulkQueriedDeploymentStatuses, + EngineSideDeploymentStatusesProvider, + GetDeploymentsStatusesError +} +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicScenarioStatus +import pl.touk.nussknacker.ui.process.repository.ProcessDBQueryRepository.ProcessNotFoundError +import pl.touk.nussknacker.ui.process.repository._ +import pl.touk.nussknacker.ui.security.api.LoggedUser +import slick.dbio.{DBIO, DBIOAction} + +import scala.concurrent.{ExecutionContext, Future} +import scala.language.higherKinds + +class ScenarioStatusProvider( + deploymentStatusesProvider: EngineSideDeploymentStatusesProvider, + dispatcher: DeploymentManagerDispatcher, + processRepository: FetchingProcessRepository[DB], + actionRepository: ScenarioActionRepository, + dbioRunner: DBIOActionRunner, +)(implicit ec: ExecutionContext) + extends LazyLogging { + + def getScenarioStatus( + processIdWithName: ProcessIdWithName + )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[StateStatus] = { + dbioRunner.run(for { + processDetailsOpt <- processRepository.fetchLatestProcessDetailsForProcessId[Unit](processIdWithName.id) + processDetails <- existsOrFail(processDetailsOpt, ProcessNotFoundError(processIdWithName.name)) + inProgressActionNames <- actionRepository.getInProgressActionNames(processDetails.processId) + scenarioStatus <- getScenarioStatusFetchingDeploymentsStatusesFromManager( + processDetails, + inProgressActionNames + ) + } yield scenarioStatus) + } + + def getScenariosStatuses[F[_]: Traverse, ScenarioShape]( + processTraverse: F[ScenarioWithDetailsEntity[ScenarioShape]] + )( + implicit user: LoggedUser, + freshnessPolicy: DataFreshnessPolicy + ): Future[F[Option[StateStatus]]] = { + val scenarios = processTraverse.toList + dbioRunner.run( + for { + actionsInProgress <- getInProgressActionTypesForScenarios(scenarios) + // We assume that prefetching gives profits for at least two scenarios + processingTypesWithMoreThanOneScenario = scenarios.groupBy(_.processingType).filter(_._2.size >= 2).keys + prefetchedDeploymentStatuses <- DBIO.from( + deploymentStatusesProvider.getBulkQueriedDeploymentStatusesForSupportedManagers( + processingTypesWithMoreThanOneScenario + ) + ) + finalScenariosStatuses <- processTraverse + .map { + case process if process.isFragment => DBIO.successful(Option.empty[StateStatus]) + case process => + getNonFragmentScenarioStatus(actionsInProgress, prefetchedDeploymentStatuses, process).map(Some(_)) + } + .sequence[DB, Option[StateStatus]] + } yield finalScenariosStatuses + ) + } + + private def getNonFragmentScenarioStatus[ScenarioShape, F[_]: Traverse]( + actionsInProgress: Map[ProcessId, Set[ScenarioActionName]], + prefetchedDeploymentStatuses: BulkQueriedDeploymentStatuses, + process: ScenarioWithDetailsEntity[ScenarioShape] + )( + implicit user: LoggedUser, + freshnessPolicy: DataFreshnessPolicy + ): DB[StateStatus] = { + val inProgressActionNames = actionsInProgress.getOrElse(process.processId, Set.empty) + getScenarioStatus( + process, + inProgressActionNames, + deploymentStatusesProvider.getDeploymentStatuses( + process.idData, + Some(prefetchedDeploymentStatuses) + ) + ) + } + + def getAllowedActionsForScenarioStatus( + processDetails: ScenarioWithDetailsEntity[_] + )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ScenarioStatusWithAllowedActions] = { + dbioRunner.run(getAllowedActionsForScenarioStatusDBIO(processDetails)) + } + + def getAllowedActionsForScenarioStatusDBIO( + processDetails: ScenarioWithDetailsEntity[_] + )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): DB[ScenarioStatusWithAllowedActions] = { + for { + inProgressActionNames <- actionRepository.getInProgressActionNames(processDetails.processId) + statusDetails <- getScenarioStatusFetchingDeploymentsStatusesFromManager( + processDetails, + inProgressActionNames + ) + allowedActions = getAllowedActions(statusDetails, processDetails, None) + } yield ScenarioStatusWithAllowedActions(statusDetails, allowedActions) + } + + private def getAllowedActions( + scenarioStatus: StateStatus, + processDetails: ScenarioWithDetailsEntity[_], + currentlyPresentedVersionId: Option[VersionId] + )(implicit user: LoggedUser): Set[ScenarioActionName] = { + dispatcher + .deploymentManagerUnsafe(processDetails.processingType) + .processStateDefinitionManager + .statusActions( + ScenarioStatusWithScenarioContext( + scenarioStatus = scenarioStatus, + deployedVersionId = processDetails.lastDeployedAction.map(_.processVersionId), + currentlyPresentedVersionId = currentlyPresentedVersionId + ) + ) + } + + private def getScenarioStatusFetchingDeploymentsStatusesFromManager( + processDetails: ScenarioWithDetailsEntity[_], + inProgressActionNames: Set[ScenarioActionName], + )(implicit freshnessPolicy: DataFreshnessPolicy, user: LoggedUser): DB[StateStatus] = { + getScenarioStatus( + processDetails, + inProgressActionNames, + deploymentStatusesProvider.getDeploymentStatuses( + processDetails.idData, + prefetchedDeploymentStatuses = None + ) + ) + } + + // This is optimisation tweak. We want to reduce number of calls for in progress action types. So for >1 scenarios + // we do one call for all in progress action types for all scenarios + private def getInProgressActionTypesForScenarios( + scenarios: List[ScenarioWithDetailsEntity[_]] + ): DB[Map[ProcessId, Set[ScenarioActionName]]] = { + scenarios match { + case Nil => DBIO.successful(Map.empty) + case head :: Nil => + actionRepository + .getInProgressActionNames(head.processId) + .map(actionNames => Map(head.processId -> actionNames)) + case _ => + // We are getting only Deploy and Cancel InProgress actions as only these two impact scenario status + actionRepository.getInProgressActionNames(Set(Deploy, Cancel)) + } + } + + private def getScenarioStatus( + processDetails: ScenarioWithDetailsEntity[_], + inProgressActionNames: Set[ScenarioActionName], + fetchDeploymentStatuses: => Future[ + Either[GetDeploymentsStatusesError, WithDataFreshnessStatus[List[DeploymentStatusDetails]]] + ], + ): DB[StateStatus] = { + def logStatusAndReturn(scenarioStatus: StateStatus) = { + logger.debug(s"Status for: '${processDetails.name}' is: $scenarioStatus") + DBIOAction.successful(scenarioStatus) + } + if (processDetails.isFragment) { + throw FragmentStateException + } else if (processDetails.isArchived) { + logStatusAndReturn(getArchivedScenarioStatus(processDetails)) + } else if (inProgressActionNames.contains(ScenarioActionName.Deploy)) { + logStatusAndReturn(SimpleStateStatus.DuringDeploy) + } else if (inProgressActionNames.contains(ScenarioActionName.Cancel)) { + logStatusAndReturn(SimpleStateStatus.DuringCancel) + } else { + processDetails.lastStateAction match { + case Some(lastStateActionValue) => + DBIOAction + .from(fetchDeploymentStatuses) + .map { + case Left(error) => + logger.warn("Failure during getting deployment statuses from deployment manager", error) + ProblemStateStatus.FailedToGet + case Right(statusWithFreshness) => + logger.debug( + s"Deployment statuses for: '${processDetails.name}' are: ${statusWithFreshness.value}, cached: ${statusWithFreshness.cached}, last status action: ${processDetails.lastStateAction + .map(_.actionName)})" + ) + statusWithFreshness.value match { + // periodic mechanism already returns a scenario status, so we don't need to resolve it + // TODO: PeriodicDeploymentManager shouldn't be a DeploymentManager, we should treat it as a separate + // mechanism for both action commands and scenario status resolving + case DeploymentStatusDetails(periodic: PeriodicScenarioStatus, _, _) :: Nil => periodic + case _ => + InconsistentStateDetector.resolveScenarioStatus(statusWithFreshness.value, lastStateActionValue) + } + } + case None => // We assume that the process never deployed should have no state at the engine + logStatusAndReturn(SimpleStateStatus.NotDeployed) + } + } + } + + // We assume that checking the state for archived doesn't make sense, and we compute the state based on the last state action + private def getArchivedScenarioStatus(processDetails: ScenarioWithDetailsEntity[_]): StateStatus = { + processDetails.lastStateAction.map(a => (a.actionName, a.state, a.id)) match { + case Some((Cancel, _, _)) => + logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.Canceled}") + SimpleStateStatus.Canceled + case Some((Deploy, ProcessActionState.ExecutionFinished, deploymentActionId)) => + logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.Finished} ") + SimpleStateStatus.Finished + case Some(_) => + logger.warn(s"Status for: '${processDetails.name}' is: ${ProblemStateStatus.ArchivedShouldBeCanceled}") + ProblemStateStatus.ArchivedShouldBeCanceled + case None => + logger.debug(s"Status for: '${processDetails.name}' is: ${SimpleStateStatus.NotDeployed}") + SimpleStateStatus.NotDeployed + } + } + + private def existsOrFail[T](checkThisOpt: Option[T], failWith: => Exception): DB[T] = { + checkThisOpt match { + case Some(checked) => DBIOAction.successful(checked) + case None => DBIOAction.failed(failWith) + } + } + +} + +final case class ScenarioStatusWithAllowedActions(scenarioStatus: StateStatus, allowedActions: Set[ScenarioActionName]) + +object FragmentStateException extends BadRequestError("Fragment doesn't have state.") diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/exception/ProcessIllegalAction.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/exception/ProcessIllegalAction.scala index ffad195cc2f..623b084c568 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/exception/ProcessIllegalAction.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/exception/ProcessIllegalAction.scala @@ -1,8 +1,9 @@ package pl.touk.nussknacker.ui.process.exception -import pl.touk.nussknacker.engine.api.deployment.{ProcessState, ScenarioActionName, StateStatus} +import pl.touk.nussknacker.engine.api.deployment.{ScenarioActionName, StateStatus} import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.ui.IllegalOperationError +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusWithAllowedActions final case class ProcessIllegalAction(message: String) extends IllegalOperationError(message, details = "") @@ -11,18 +12,10 @@ object ProcessIllegalAction { def apply( actionName: ScenarioActionName, processName: ProcessName, - state: ProcessState - ): ProcessIllegalAction = - apply(actionName, processName, state.status.name, state.allowedActions.toSet) - - def apply( - actionName: ScenarioActionName, - processName: ProcessName, - statusName: StateStatus.StatusName, - allowedActions: Set[ScenarioActionName] + ScenarioStatusWithAllowedActions: ScenarioStatusWithAllowedActions ): ProcessIllegalAction = ProcessIllegalAction( - s"Action: $actionName is not allowed in scenario ($processName) state: ${statusName}, allowed actions: ${allowedActions + s"Action: $actionName is not allowed in scenario ($processName) state: ${ScenarioStatusWithAllowedActions.scenarioStatus}, allowed actions: ${ScenarioStatusWithAllowedActions.allowedActions .map(_.value) .mkString(",")}." ) diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/newdeployment/synchronize/DeploymentsStatusesSynchronizationScheduler.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/newdeployment/synchronize/DeploymentsStatusesSynchronizationScheduler.scala index a80b1bd965f..60029dc6cd5 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/newdeployment/synchronize/DeploymentsStatusesSynchronizationScheduler.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/newdeployment/synchronize/DeploymentsStatusesSynchronizationScheduler.scala @@ -1,6 +1,8 @@ package pl.touk.nussknacker.ui.process.newdeployment.synchronize import akka.actor.{ActorSystem, Cancellable} +import cats.effect.IO +import cats.effect.kernel.Resource import com.typesafe.config.Config import com.typesafe.scalalogging.LazyLogging import net.ceedubs.ficus.Ficus._ @@ -11,19 +13,17 @@ import scala.concurrent.duration._ import scala.util.Try // TODO: Properly handle HA setup: synchronizeAll() should be invoked only on one instance of designer in a time -class DeploymentsStatusesSynchronizationScheduler( - actorSystem: ActorSystem, - synchronizer: DeploymentsStatusesSynchronizer, - config: DeploymentsStatusesSynchronizationConfig -) extends AutoCloseable - with LazyLogging { +object DeploymentsStatusesSynchronizationScheduler extends LazyLogging { - @volatile private var scheduledJob: Option[Cancellable] = None + def resource( + actorSystem: ActorSystem, + synchronizer: DeploymentsStatusesSynchronizer, + config: DeploymentsStatusesSynchronizationConfig + ): Resource[IO, Cancellable] = { - import actorSystem.dispatcher + import actorSystem.dispatcher - def start(): Unit = { - scheduledJob = Some( + Resource.make(IO { actorSystem.scheduler.scheduleAtFixedRate(0 seconds, config.delayBetweenSynchronizations) { () => Try(Await.result(synchronizer.synchronizeAll(), config.synchronizationTimeout)).failed.foreach { ex => logger.error( @@ -32,11 +32,11 @@ class DeploymentsStatusesSynchronizationScheduler( ) } } - ) - } - - override def close(): Unit = { - scheduledJob.map(_.cancel()) + }) { scheduledJob => + IO { + scheduledJob.cancel() + } + } } } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicDeploymentManager.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicDeploymentManager.scala index 5a143896590..6aee044eddb 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicDeploymentManager.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicDeploymentManager.scala @@ -7,10 +7,9 @@ import pl.touk.nussknacker.engine.DeploymentManagerDependencies import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.scheduler.model.{ScheduleProperty => ApiScheduleProperty} import pl.touk.nussknacker.engine.api.deployment.scheduler.services._ -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicProcessStatus import pl.touk.nussknacker.ui.process.periodic.Utils._ import pl.touk.nussknacker.ui.process.repository.PeriodicProcessesRepository @@ -191,41 +190,13 @@ class PeriodicDeploymentManager private[periodic] ( } } - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = - service.stateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + service.deploymentsStatusesQueryForAllScenariosSupport - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - service.getStatusDetails(name).map(_.map(List(_))) - } - - override def resolve( - idWithName: ProcessIdWithName, - statusDetailsList: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - val statusDetails = statusDetailsList match { - case head :: _ => - head - case Nil => - val status = PeriodicProcessStatus(List.empty, List.empty) - status.mergedStatusDetails.copy(status = status) - } - // TODO: add "real" presentation of deployments in GUI - val mergedStatus = processStateDefinitionManager - .processState( - statusDetails.copy(status = - statusDetails.status.asInstanceOf[PeriodicProcessStatus].mergedStatusDetails.status - ), - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - Future.successful(mergedStatus.copy(tooltip = processStateDefinitionManager.statusTooltip(statusDetails.status))) + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + service.getMergedStatusDetails(scenarioName).map(_.map(List(_))) } override def processStateDefinitionManager: ProcessStateDefinitionManager = diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessService.scala index 93512bb1a2f..25b7a213420 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessService.scala @@ -219,7 +219,7 @@ class PeriodicProcessService( toDeploy: PeriodicProcessDeployment ): Future[Option[PeriodicProcessDeployment]] = { delegateDeploymentManager - .getProcessStates(toDeploy.periodicProcess.deploymentData.processName)(DataFreshnessPolicy.Fresh) + .getScenarioDeploymentsStatuses(toDeploy.periodicProcess.deploymentData.processName)(DataFreshnessPolicy.Fresh) .map( _.value .map(_.status) @@ -265,7 +265,9 @@ class PeriodicProcessService( schedules: SchedulesState ): Future[(Set[PeriodicProcessDeploymentId], Set[PeriodicProcessDeploymentId])] = for { - runtimeStatuses <- delegateDeploymentManager.getProcessStates(processName)(DataFreshnessPolicy.Fresh).map(_.value) + runtimeStatuses <- delegateDeploymentManager + .getScenarioDeploymentsStatuses(processName)(DataFreshnessPolicy.Fresh) + .map(_.value) _ = logger.debug(s"Process '$processName' runtime statuses: ${runtimeStatuses.map(_.toString)}") scheduleDeploymentsWithStatus = schedules.schedules.values.toList.flatMap { scheduleData => logger.debug( @@ -302,22 +304,22 @@ class PeriodicProcessService( processName: ProcessName, versionId: VersionId, deployment: ScheduleDeploymentData, - processState: Option[StatusDetails], + statusDetails: Option[DeploymentStatusDetails], ): Future[NeedsReschedule] = { implicit class RichFuture[Unit](a: Future[Unit]) { def needsReschedule(value: Boolean): Future[NeedsReschedule] = a.map(_ => value) } - processState.map(_.status) match { + statusDetails.map(_.status) match { case Some(status) if ProblemStateStatus.isProblemStatus( status ) && deployment.state.status != PeriodicProcessDeploymentStatus.Failed => - markFailedAction(deployment, processState).needsReschedule(executionConfig.rescheduleOnFailure) + markFailedAction(deployment, statusDetails).needsReschedule(executionConfig.rescheduleOnFailure) case Some(status) if EngineStatusesToReschedule.contains( status ) && deployment.state.status != PeriodicProcessDeploymentStatus.Finished => - markFinished(processName, versionId, deployment, processState).needsReschedule(value = true) + markFinished(processName, versionId, deployment, statusDetails).needsReschedule(value = true) case None if deployment.state.status == PeriodicProcessDeploymentStatus.Deployed && deployment.deployedAt.exists(_.isBefore(LocalDateTime.now().minusMinutes(5))) => @@ -325,7 +327,7 @@ class PeriodicProcessService( // this can be caused by a race in e.g. FlinkRestManager // (because /jobs/overview used in getProcessStates isn't instantly aware of submitted jobs) // so freshly deployed deployments aren't considered - markFinished(processName, versionId, deployment, processState).needsReschedule(value = true) + markFinished(processName, versionId, deployment, statusDetails).needsReschedule(value = true) case _ => Future.successful(()).needsReschedule(value = false) } @@ -387,7 +389,7 @@ class PeriodicProcessService( processName: ProcessName, versionId: VersionId, deployment: ScheduleDeploymentData, - state: Option[StatusDetails], + state: Option[DeploymentStatusDetails], ): Future[Unit] = { logger.info(s"Marking ${deployment.display} with status: ${deployment.state.status} as finished") for { @@ -404,7 +406,7 @@ class PeriodicProcessService( private def handleFailedDeployment( deployment: PeriodicProcessDeployment, - state: Option[StatusDetails] + state: Option[DeploymentStatusDetails] ): Future[Unit] = { def calculateNextRetryAt = now().plus(deploymentRetryConfig.deployRetryPenalize.toMillis, ChronoUnit.MILLIS) @@ -431,7 +433,7 @@ class PeriodicProcessService( private def markFailedAction( deployment: ScheduleDeploymentData, - state: Option[StatusDetails] + state: Option[DeploymentStatusDetails] ): Future[Unit] = { logger.info(s"Marking ${deployment.display} as failed.") for { @@ -554,10 +556,10 @@ class PeriodicProcessService( private def now(): LocalDateTime = LocalDateTime.now(clock) - def getStatusDetails( + def getMergedStatusDetails( name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[StatusDetails]] = { - delegateDeploymentManager.getProcessStates(name).flatMap { statusesWithFreshness => + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[DeploymentStatusDetails]] = { + delegateDeploymentManager.getScenarioDeploymentsStatuses(name).flatMap { statusesWithFreshness => logger.debug(s"Statuses for $name: $statusesWithFreshness") mergeStatusWithDeployments(name, statusesWithFreshness.value).map { statusDetails => statusesWithFreshness.copy(value = statusDetails) @@ -565,16 +567,16 @@ class PeriodicProcessService( } } - def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = - delegateDeploymentManager.stateQueryForAllScenariosSupport match { - case supported: StateQueryForAllScenariosSupported => - new StateQueryForAllScenariosSupported { + def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + delegateDeploymentManager.deploymentsStatusesQueryForAllScenariosSupport match { + case supported: DeploymentsStatusesQueryForAllScenariosSupported => + new DeploymentsStatusesQueryForAllScenariosSupported { - override def getAllProcessesStates()( + override def getAllScenariosDeploymentsStatuses()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]] = { + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]] = { for { - allStatusDetailsInDelegate <- supported.getAllProcessesStates() + allStatusDetailsInDelegate <- supported.getAllScenariosDeploymentsStatuses() allStatusDetailsInPeriodic <- mergeStatusWithDeployments(allStatusDetailsInDelegate.value) result = allStatusDetailsInPeriodic.map { case (name, status) => (name, List(status)) } } yield allStatusDetailsInDelegate.map(_ => result) @@ -582,14 +584,14 @@ class PeriodicProcessService( } - case NoStateQueryForAllScenariosSupport => - NoStateQueryForAllScenariosSupport + case NoDeploymentsStatusesQueryForAllScenariosSupport => + NoDeploymentsStatusesQueryForAllScenariosSupport } private def mergeStatusWithDeployments( name: ProcessName, - runtimeStatuses: List[StatusDetails] - ): Future[StatusDetails] = { + runtimeStatuses: List[DeploymentStatusDetails] + ): Future[DeploymentStatusDetails] = { def toDeploymentStatuses(schedulesState: SchedulesState) = schedulesState.schedules.toList .flatMap { case (scheduleId, scheduleData) => scheduleData.latestDeployments.map { deployment => @@ -614,14 +616,13 @@ class PeriodicProcessService( MaxDeploymentsStatus ) } yield { - val status = PeriodicProcessStatus(toDeploymentStatuses(activeSchedules), toDeploymentStatuses(inactiveSchedules)) - status.mergedStatusDetails.copy(status = status) + mergedDeploymentStatus(toDeploymentStatuses(activeSchedules), toDeploymentStatuses(inactiveSchedules)) } } private def mergeStatusWithDeployments( - runtimeStatuses: Map[ProcessName, List[StatusDetails]] - ): Future[Map[ProcessName, StatusDetails]] = { + runtimeStatuses: Map[ProcessName, List[DeploymentStatusDetails]] + ): Future[Map[ProcessName, DeploymentStatusDetails]] = { def toDeploymentStatuses(processName: ProcessName, schedulesState: SchedulesState) = schedulesState.schedules.toList .flatMap { case (scheduleId, scheduleData) => @@ -647,12 +648,11 @@ class PeriodicProcessService( allProcessNames.map { processName => val activeSchedulesForProcess = activeSchedules.getOrElse(processName, SchedulesState(Map.empty)) val inactiveSchedulesForProcess = inactiveSchedules.getOrElse(processName, SchedulesState(Map.empty)) - val status = PeriodicProcessStatus( + val status = mergedDeploymentStatus( toDeploymentStatuses(processName, activeSchedulesForProcess), toDeploymentStatuses(processName, inactiveSchedulesForProcess) ) - val mergedStatus = status.mergedStatusDetails.copy(status = status) - (processName, mergedStatus) + (processName, status) }.toMap } } @@ -697,10 +697,10 @@ class PeriodicProcessService( ) .run - implicit class RuntimeStatusesExt(runtimeStatuses: List[StatusDetails]) { + implicit class RuntimeStatusesExt(runtimeStatuses: List[DeploymentStatusDetails]) { private val runtimeStatusesMap = runtimeStatuses.flatMap(status => status.deploymentId.map(_ -> status)).toMap - def getStatus(deploymentId: PeriodicProcessDeploymentId): Option[StatusDetails] = + def getStatus(deploymentId: PeriodicProcessDeploymentId): Option[DeploymentStatusDetails] = runtimeStatusesMap.get(DeploymentId(deploymentId.toString)) } @@ -747,10 +747,8 @@ class PeriodicProcessService( object PeriodicProcessService { - private implicit val localDateOrdering: Ordering[LocalDateTime] = Ordering.by(identity[ChronoLocalDateTime[_]]) - // TODO: some configuration? - private val MaxDeploymentsStatus = 5 + private[periodic] val MaxDeploymentsStatus = 5 private val DeploymentStatusesToReschedule = Set(PeriodicProcessDeploymentStatus.Deployed, PeriodicProcessDeploymentStatus.Failed) @@ -762,105 +760,118 @@ object PeriodicProcessService { // for each historical and active deployments. mergedStatusDetails and methods below are for purpose of presentation // of single, merged status similar to this available for streaming job. This merged status should be a straightforward derivative // of these deployments statuses so it will be easy to figure out it by user. - case class PeriodicProcessStatus( + + // Currently we don't present deployments - theirs statuses are available only in tooltip - because of that we have to pick + // one "merged" status that will be presented to users + private def mergedDeploymentStatus( activeDeploymentsStatuses: List[PeriodicDeploymentStatus], inactiveDeploymentsStatuses: List[PeriodicDeploymentStatus] - ) extends StateStatus - with LazyLogging { - - def limitedAndSortedDeployments: List[PeriodicDeploymentStatus] = - (activeDeploymentsStatuses ++ inactiveDeploymentsStatuses.take( - MaxDeploymentsStatus - activeDeploymentsStatuses.size - )).sorted(PeriodicDeploymentStatus.ordering.reverse) - - // We present merged name to be possible to filter scenario by status - override def name: StatusName = mergedStatusDetails.status.name - - // Currently we don't present deployments - theirs statuses are available only in tooltip - because of that we have to pick - // one "merged" status that will be presented to users - def mergedStatusDetails: StatusDetails = { - pickMostImportantActiveDeployment - .map { deploymentStatus => - def createStatusDetails(status: StateStatus) = StatusDetails( - status = status, - deploymentId = Some(DeploymentId(deploymentStatus.deploymentId.toString)), - ) - if (deploymentStatus.isWaitingForReschedule) { - deploymentStatus.runtimeStatusOpt - .map(_.copy(status = WaitingForScheduleStatus)) - .getOrElse(createStatusDetails(WaitingForScheduleStatus)) - } else if (deploymentStatus.status == PeriodicProcessDeploymentStatus.Scheduled) { - createStatusDetails(ScheduledStatus(deploymentStatus.runAt)) - } else if (Set(PeriodicProcessDeploymentStatus.Failed, PeriodicProcessDeploymentStatus.FailedOnDeploy) - .contains(deploymentStatus.status)) { - createStatusDetails(ProblemStateStatus.Failed) - } else if (deploymentStatus.status == PeriodicProcessDeploymentStatus.RetryingDeploy) { - createStatusDetails(SimpleStateStatus.DuringDeploy) - } else { - deploymentStatus.runtimeStatusOpt.getOrElse { - createStatusDetails(WaitingForScheduleStatus) + ): DeploymentStatusDetails = { + def toPeriodicProcessStatusWithMergedStatus(mergedStatus: StateStatus) = PeriodicScenarioStatus( + activeDeploymentsStatuses, + inactiveDeploymentsStatuses, + mergedStatus + ) + + def createStatusDetails(mergedStatus: StateStatus, periodicDeploymentIdOpt: Option[PeriodicProcessDeploymentId]) = + DeploymentStatusDetails( + status = toPeriodicProcessStatusWithMergedStatus(mergedStatus), + deploymentId = periodicDeploymentIdOpt.map(_.toString).map(DeploymentId(_)), + version = None + ) + + pickMostImportantActiveDeployment(activeDeploymentsStatuses) + .map { deploymentStatus => + if (deploymentStatus.isWaitingForReschedule) { + deploymentStatus.runtimeStatusOpt + .map(_.copy(status = toPeriodicProcessStatusWithMergedStatus(WaitingForScheduleStatus))) + .getOrElse(createStatusDetails(WaitingForScheduleStatus, Some(deploymentStatus.deploymentId))) + } else if (deploymentStatus.status == PeriodicProcessDeploymentStatus.Scheduled) { + createStatusDetails(ScheduledStatus(deploymentStatus.runAt), Some(deploymentStatus.deploymentId)) + } else if (Set(PeriodicProcessDeploymentStatus.Failed, PeriodicProcessDeploymentStatus.FailedOnDeploy) + .contains(deploymentStatus.status)) { + createStatusDetails(ProblemStateStatus.Failed, Some(deploymentStatus.deploymentId)) + } else if (deploymentStatus.status == PeriodicProcessDeploymentStatus.RetryingDeploy) { + createStatusDetails(SimpleStateStatus.DuringDeploy, Some(deploymentStatus.deploymentId)) + } else { + deploymentStatus.runtimeStatusOpt + .map(runtimeDetails => + runtimeDetails.copy(status = toPeriodicProcessStatusWithMergedStatus(runtimeDetails.status)) + ) + .getOrElse { + createStatusDetails(WaitingForScheduleStatus, Some(deploymentStatus.deploymentId)) } - } } - .getOrElse { - if (inactiveDeploymentsStatuses.isEmpty) { - StatusDetails(SimpleStateStatus.NotDeployed, None) - } else { - val latestInactiveProcessId = - inactiveDeploymentsStatuses.maxBy(_.scheduleId.processId.value).scheduleId.processId - val latestDeploymentsForEachScheduleOfLatestProcessId = latestDeploymentForEachSchedule( - inactiveDeploymentsStatuses.filter(_.scheduleId.processId == latestInactiveProcessId) - ) + } + .getOrElse { + if (inactiveDeploymentsStatuses.isEmpty) { + createStatusDetails(SimpleStateStatus.NotDeployed, None) + } else { + val latestInactiveProcessId = + inactiveDeploymentsStatuses.maxBy(_.scheduleId.processId.value).scheduleId.processId + val latestDeploymentsForEachScheduleOfLatestProcessId = latestDeploymentForEachSchedule( + inactiveDeploymentsStatuses.filter(_.scheduleId.processId == latestInactiveProcessId) + ) - if (latestDeploymentsForEachScheduleOfLatestProcessId.forall( - _.status == PeriodicProcessDeploymentStatus.Finished - )) { - StatusDetails(SimpleStateStatus.Finished, None) - } else { - StatusDetails(SimpleStateStatus.Canceled, None) - } + if (latestDeploymentsForEachScheduleOfLatestProcessId.forall( + _.status == PeriodicProcessDeploymentStatus.Finished + )) { + createStatusDetails(SimpleStateStatus.Finished, None) + } else { + createStatusDetails(SimpleStateStatus.Canceled, None) } } - } + } + } - /** - * Returns latest deployment. It can be in any status (consult [[PeriodicProcessDeploymentStatus]]). - * For multiple schedules only single schedule is returned in the following order: - *
    - *
  1. If there are any deployed scenarios, then the first one is returned. Please be aware that deployment of previous - * schedule could fail.
  2. - *
  3. If there are any failed scenarios, then the last one is returned. We want to inform user, that some deployments - * failed and the scenario should be rescheduled/retried manually. - *
  4. If there are any scheduled scenarios, then the first one to be run is returned. - *
  5. If there are any finished scenarios, then the last one is returned. It should not happen because the scenario - * should be deactivated earlier. - *
- */ - def pickMostImportantActiveDeployment: Option[PeriodicDeploymentStatus] = { - val lastActiveDeploymentStatusForEachSchedule = - latestDeploymentForEachSchedule(activeDeploymentsStatuses).sorted - - def first(status: PeriodicProcessDeploymentStatus) = - lastActiveDeploymentStatusForEachSchedule.find(_.status == status) - - def last(status: PeriodicProcessDeploymentStatus) = - lastActiveDeploymentStatusForEachSchedule.reverse.find(_.status == status) - - first(PeriodicProcessDeploymentStatus.Deployed) - .orElse(last(PeriodicProcessDeploymentStatus.Failed)) - .orElse(last(PeriodicProcessDeploymentStatus.RetryingDeploy)) - .orElse(last(PeriodicProcessDeploymentStatus.FailedOnDeploy)) - .orElse(first(PeriodicProcessDeploymentStatus.Scheduled)) - .orElse(last(PeriodicProcessDeploymentStatus.Finished)) - } + /** + * Returns latest deployment. It can be in any status (consult [[PeriodicProcessDeploymentStatus]]). + * For multiple schedules only single schedule is returned in the following order: + *
    + *
  1. If there are any deployed scenarios, then the first one is returned. Please be aware that deployment of previous + * schedule could fail.
  2. + *
  3. If there are any failed scenarios, then the last one is returned. We want to inform user, that some deployments + * failed and the scenario should be rescheduled/retried manually. + *
  4. If there are any scheduled scenarios, then the first one to be run is returned. + *
  5. If there are any finished scenarios, then the last one is returned. It should not happen because the scenario + * should be deactivated earlier. + *
+ */ + private[periodic] def pickMostImportantActiveDeployment( + activeDeploymentsStatuses: List[PeriodicDeploymentStatus] + ): Option[PeriodicDeploymentStatus] = { + val lastActiveDeploymentStatusForEachSchedule = + latestDeploymentForEachSchedule(activeDeploymentsStatuses).sorted + + def first(status: PeriodicProcessDeploymentStatus) = + lastActiveDeploymentStatusForEachSchedule.find(_.status == status) + + def last(status: PeriodicProcessDeploymentStatus) = + lastActiveDeploymentStatusForEachSchedule.reverse.find(_.status == status) + + first(PeriodicProcessDeploymentStatus.Deployed) + .orElse(last(PeriodicProcessDeploymentStatus.Failed)) + .orElse(last(PeriodicProcessDeploymentStatus.RetryingDeploy)) + .orElse(last(PeriodicProcessDeploymentStatus.FailedOnDeploy)) + .orElse(first(PeriodicProcessDeploymentStatus.Scheduled)) + .orElse(last(PeriodicProcessDeploymentStatus.Finished)) + } - private def latestDeploymentForEachSchedule(deploymentsStatuses: List[PeriodicDeploymentStatus]) = { - deploymentsStatuses - .groupBy(_.scheduleId) - .values - .toList - .map(_.min(PeriodicDeploymentStatus.ordering.reverse)) - } + private def latestDeploymentForEachSchedule(deploymentsStatuses: List[PeriodicDeploymentStatus]) = { + deploymentsStatuses + .groupBy(_.scheduleId) + .values + .toList + .map(_.min(PeriodicDeploymentStatus.ordering.reverse)) + } + + case class PeriodicScenarioStatus( + activeDeploymentsStatuses: List[PeriodicDeploymentStatus], + inactiveDeploymentsStatuses: List[PeriodicDeploymentStatus], + mergedStatus: StateStatus + ) extends StateStatus { + + override def name: StatusName = mergedStatus.name } @@ -876,7 +887,7 @@ object PeriodicProcessService { status: PeriodicProcessDeploymentStatus, processActive: Boolean, // Some additional information that are available in StatusDetails returned by engine runtime - runtimeStatusOpt: Option[StatusDetails] + runtimeStatusOpt: Option[DeploymentStatusDetails] ) { def scheduleName: ScheduleName = scheduleId.scheduleName diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessStateDefinitionManager.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessStateDefinitionManager.scala index 17c3642a8c8..544410cbd87 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessStateDefinitionManager.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessStateDefinitionManager.scala @@ -1,8 +1,22 @@ package pl.touk.nussknacker.ui.process.periodic -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.defaultVisibleActions -import pl.touk.nussknacker.engine.api.deployment.{OverridingProcessStateDefinitionManager, ProcessStateDefinitionManager, ScenarioActionName, StateStatus} -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.{PeriodicDeploymentStatus, PeriodicProcessStatus} +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.{ + DefaultVisibleActions, + ScenarioStatusWithScenarioContext +} +import pl.touk.nussknacker.engine.api.deployment.{ + OverridingProcessStateDefinitionManager, + ProcessStateDefinitionManager, + ScenarioActionName, + StateStatus +} +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.{ + MaxDeploymentsStatus, + PeriodicDeploymentStatus, + PeriodicScenarioStatus +} + +import java.net.URI class PeriodicProcessStateDefinitionManager(delegate: ProcessStateDefinitionManager) extends OverridingProcessStateDefinitionManager( @@ -10,15 +24,57 @@ class PeriodicProcessStateDefinitionManager(delegate: ProcessStateDefinitionMana statusTooltipsPF = PeriodicStateStatus.statusTooltipsPF, statusDescriptionsPF = PeriodicStateStatus.statusDescriptionsPF, customStateDefinitions = PeriodicStateStatus.customStateDefinitions, - customVisibleActions = Some(defaultVisibleActions ::: ScenarioActionName.RunOffSchedule :: Nil), + customVisibleActions = Some(DefaultVisibleActions ::: ScenarioActionName.RunOffSchedule :: Nil), customActionTooltips = Some(PeriodicStateStatus.customActionTooltips), delegate = delegate ) { - override def statusTooltip(stateStatus: StateStatus): String = { - stateStatus match { - case periodic: PeriodicProcessStatus => PeriodicProcessStateDefinitionManager.statusTooltip(periodic) - case _ => super.statusTooltip(stateStatus) + override def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] = { + super.statusActions( + extractPeriodicStatus(input.scenarioStatus) + .map(periodic => input.copy(scenarioStatus = periodic.mergedStatus)) + .getOrElse(input) // We have to handle also statuses resolved by core (for example NotDeployed) + ) + } + + override def actionTooltips(input: ScenarioStatusWithScenarioContext): Map[ScenarioActionName, String] = { + super.actionTooltips( + extractPeriodicStatus(input.scenarioStatus) + .map(periodic => input.copy(scenarioStatus = periodic.mergedStatus)) + .getOrElse(input) // We have to handle also statuses resolved by core (for example NotDeployed) + ) + } + + override def statusIcon(input: ScenarioStatusWithScenarioContext): URI = { + super.statusIcon( + extractPeriodicStatus(input.scenarioStatus) + .map(periodic => input.copy(scenarioStatus = periodic.mergedStatus)) + .getOrElse(input) // We have to handle also statuses resolved by core (for example NotDeployed) + ) + } + + override def statusDescription(input: ScenarioStatusWithScenarioContext): String = { + super.statusDescription( + extractPeriodicStatus(input.scenarioStatus) + .map(periodic => input.copy(scenarioStatus = periodic.mergedStatus)) + .getOrElse(input) // We have to handle also statuses resolved by core (for example NotDeployed) + ) + } + + override def statusTooltip(input: ScenarioStatusWithScenarioContext): String = { + extractPeriodicStatus(input.scenarioStatus) + .map { periodicStatus => + PeriodicProcessStateDefinitionManager.statusTooltip( + activeDeploymentsStatuses = periodicStatus.activeDeploymentsStatuses, + inactiveDeploymentsStatuses = periodicStatus.inactiveDeploymentsStatuses + ) + } + .getOrElse(super.statusTooltip(input)) + } + + private def extractPeriodicStatus(stateStatus: StateStatus) = { + Option(stateStatus) collect { case periodic: PeriodicScenarioStatus => + periodic } } @@ -26,8 +82,15 @@ class PeriodicProcessStateDefinitionManager(delegate: ProcessStateDefinitionMana object PeriodicProcessStateDefinitionManager { - def statusTooltip(processStatus: PeriodicProcessStatus): String = { - processStatus.limitedAndSortedDeployments + def statusTooltip( + activeDeploymentsStatuses: List[PeriodicDeploymentStatus], + inactiveDeploymentsStatuses: List[PeriodicDeploymentStatus] + ): String = { + val limitedAndSortedDeployments: List[PeriodicDeploymentStatus] = + (activeDeploymentsStatuses ++ inactiveDeploymentsStatuses.take( + MaxDeploymentsStatus - activeDeploymentsStatuses.size + )).sorted(PeriodicDeploymentStatus.ordering.reverse) + limitedAndSortedDeployments .map { case d @ PeriodicDeploymentStatus(_, scheduleId, _, runAt, status, _, _) => val refinedStatus = { if (d.isCanceled) { diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicStateStatus.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicStateStatus.scala index ce47c08485d..9897ce887ad 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicStateStatus.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicStateStatus.scala @@ -1,6 +1,6 @@ package pl.touk.nussknacker.ui.process.periodic -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus @@ -26,33 +26,33 @@ object PeriodicStateStatus { case object ScheduledStatus { val name = "SCHEDULED" - - def isScheduledStatus(status: StateStatus): Boolean = { - status.name == name - } - } val WaitingForScheduleStatus: StateStatus = StateStatus("WAITING_FOR_SCHEDULE") - val statusActionsPF: PartialFunction[ProcessStatus, List[ScenarioActionName]] = { - case ProcessStatus(SimpleStateStatus.Running, _, _, _) => - // periodic processes cannot be redeployed from GUI - List(ScenarioActionName.Cancel) - case ProcessStatus(_: ScheduledStatus, _, deployedVersionId, Some(currentlyPresentedVersionId)) - if deployedVersionId.contains(currentlyPresentedVersionId) => - List(ScenarioActionName.Cancel, ScenarioActionName.Deploy, ScenarioActionName.RunOffSchedule) - case ProcessStatus(_: ScheduledStatus, _, _, None) => - // At the moment of deployment or validation, we may not have the information about the currently displayed version - // In that case we assume, that it was validated before the deployment was initiated. - List(ScenarioActionName.Cancel, ScenarioActionName.Deploy, ScenarioActionName.RunOffSchedule) - case ProcessStatus(_: ScheduledStatus, _, _, _) => - List(ScenarioActionName.Cancel, ScenarioActionName.Deploy) - case ProcessStatus(WaitingForScheduleStatus, _, _, _) => - List(ScenarioActionName.Cancel) // or maybe should it be empty?? - case ProcessStatus(_: ProblemStateStatus, _, _, _) => - List(ScenarioActionName.Cancel) // redeploy is not allowed - } + val statusActionsPF: PartialFunction[ScenarioStatusWithScenarioContext, Set[ScenarioActionName]] = + Function.unlift((input: ScenarioStatusWithScenarioContext) => + (input.scenarioStatus, input.deployedVersionId, input.currentlyPresentedVersionId) match { + case (SimpleStateStatus.Running, _, _) => + // periodic processes cannot be redeployed from GUI + Some(Set(ScenarioActionName.Cancel)) + case (_: ScheduledStatus, deployedVersionId, Some(currentlyPresentedVersionId)) + if deployedVersionId.contains(currentlyPresentedVersionId) => + Some(Set(ScenarioActionName.Cancel, ScenarioActionName.Deploy, ScenarioActionName.RunOffSchedule)) + case (_: ScheduledStatus, _, None) => + // At the moment of deployment or validation, we may not have the information about the currently displayed version + // In that case we assume, that it was validated before the deployment was initiated. + Some(Set(ScenarioActionName.Cancel, ScenarioActionName.Deploy, ScenarioActionName.RunOffSchedule)) + case (_: ScheduledStatus, _, _) => + Some(Set(ScenarioActionName.Cancel, ScenarioActionName.Deploy)) + case (WaitingForScheduleStatus, _, _) => + Some(Set(ScenarioActionName.Cancel)) // or maybe should it be empty?? + case (_: ProblemStateStatus, _, _) => + Some(Set(ScenarioActionName.Cancel)) // redeploy is not allowed + case _ => + None + } + ) val statusTooltipsPF: PartialFunction[StateStatus, String] = { case ScheduledStatus(nextRunAt) => s"Scheduled at ${nextRunAt.pretty}" @@ -77,20 +77,19 @@ object PeriodicStateStatus { ), ) - def customActionTooltips(processStatus: ProcessStatus): Map[ScenarioActionName, String] = { - processStatus match { - case ProcessStatus(_: ScheduledStatus, _, deployedVersionId, currentlyPresentedVersionId) - if currentlyPresentedVersionId == deployedVersionId => + def customActionTooltips(input: ScenarioStatusWithScenarioContext): Map[ScenarioActionName, String] = { + input.scenarioStatus match { + case _: ScheduledStatus if input.currentlyPresentedVersionId == input.deployedVersionId => Map.empty - case ProcessStatus(_: ScheduledStatus, _, deployedVersionIdOpt, currentlyPresentedVersionId) => + case _: ScheduledStatus => def print(versionIdOpt: Option[VersionId]) = versionIdOpt match { case Some(versionId) => s"${versionId.value}" case None => "[unknown]" } Map( - ScenarioActionName.RunOffSchedule -> s"Version ${print(deployedVersionIdOpt)} is deployed, but different version ${print(currentlyPresentedVersionId)} is displayed" + ScenarioActionName.RunOffSchedule -> s"Version ${print(input.deployedVersionId)} is deployed, but different version ${print(input.currentlyPresentedVersionId)} is displayed" ) - case ProcessStatus(other, _, _, _) => + case other => Map(ScenarioActionName.RunOffSchedule -> s"Disabled for ${other.name} status.") } } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/processingtype/InvalidDeploymentManagerStub.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/processingtype/InvalidDeploymentManagerStub.scala index ee66bfe9b5b..c657132b054 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/processingtype/InvalidDeploymentManagerStub.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/processingtype/InvalidDeploymentManagerStub.scala @@ -3,7 +3,7 @@ package pl.touk.nussknacker.ui.process.processingtype import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleProcessStateDefinitionManager import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.ui.process.exception.ProcessIllegalAction import scala.concurrent.Future @@ -13,35 +13,18 @@ object InvalidDeploymentManagerStub extends DeploymentManager { private val stubbedActionResponse = Future.failed(new ProcessIllegalAction("Can't perform action because of an error in deployment configuration")) - private val stubbedStatus = StatusDetails( - ProblemStateStatus("Error in deployment configuration", allowedActions = List.empty), - deploymentId = None + private val stubbedStatus = DeploymentStatusDetails( + ProblemStateStatus("Error in deployment configuration", allowedActions = Set.empty), + deploymentId = None, + version = None ) - override def getProcessStates(name: ProcessName)( + override def getScenarioDeploymentsStatuses(scenarioName: ProcessName)( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { + ): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { Future.successful(WithDataFreshnessStatus.fresh(List(stubbedStatus))) } - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - Future.successful( - processStateDefinitionManager.processState( - stubbedStatus, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId - ) - ) - } - override def processStateDefinitionManager: ProcessStateDefinitionManager = SimpleProcessStateDefinitionManager override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { @@ -52,7 +35,8 @@ object InvalidDeploymentManagerStub extends DeploymentManager { override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/DBFetchingProcessRepository.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/DBFetchingProcessRepository.scala index 7ff3cb06441..6903a09d9ae 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/DBFetchingProcessRepository.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/DBFetchingProcessRepository.scala @@ -121,19 +121,13 @@ abstract class DBFetchingProcessRepository[F[_]: Monad]( ) lastStateActionPerProcess <- fetchActionsOrEmpty( actionRepository - .getLastActionPerProcess(ProcessActionState.FinishedStates, Some(ScenarioActionName.StateActions)) + .getLastActionPerProcess(ProcessActionState.FinishedStates, Some(ScenarioActionName.ScenarioStatusActions)) ) // For last deploy action we are interested in Deploys that are Finished (not ExecutionFinished) and that are not Cancelled // so that the presence of such an action means that the process is currently deployed - lastDeployedActionPerProcess <- fetchActionsOrEmpty( - actionRepository - .getLastActionPerProcess( - ProcessActionState.FinishedStates, - Some(Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel)) - ) - ).map(_.filter { case (_, action) => + lastDeployedActionPerProcess = lastStateActionPerProcess.filter { case (_, action) => action.actionName == ScenarioActionName.Deploy && action.state == ProcessActionState.Finished - }) + } latestProcesses <- fetchLatestProcessesQuery(query, lastDeployedActionPerProcess.keySet, isDeployed).result labels <- scenarioLabelsRepository.getLabels @@ -248,11 +242,11 @@ abstract class DBFetchingProcessRepository[F[_]: Monad]( process = process, processVersion = processVersion, lastActionData = actions.headOption, - lastStateActionData = actions.find(a => ScenarioActionName.StateActions.contains(a.actionName)), + lastStateActionData = actions.find(a => ScenarioActionName.ScenarioStatusActions.contains(a.actionName)), // For last deploy action we are interested in Deploys that are Finished (not ExecutionFinished) and that are not Cancelled // so that the presence of such an action means that the process is currently deployed lastDeployedActionData = actions - .find(action => Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel).contains(action.actionName)) + .find(action => ScenarioActionName.ScenarioStatusActions.contains(action.actionName)) .filter(action => action.actionName == ScenarioActionName.Deploy && action.state == ProcessActionState.Finished ), diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/ScenarioWithDetailsEntity.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/ScenarioWithDetailsEntity.scala index 56bd104ca55..48edf7cc7f6 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/ScenarioWithDetailsEntity.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/ScenarioWithDetailsEntity.scala @@ -1,6 +1,7 @@ package pl.touk.nussknacker.ui.process.repository -import pl.touk.nussknacker.engine.api.deployment.ProcessAction +import pl.touk.nussknacker.engine.api.deployment.ProcessActionState.ProcessActionState +import pl.touk.nussknacker.engine.api.deployment.{ProcessAction, ProcessActionId, ScenarioActionName} import pl.touk.nussknacker.engine.api.graph.ScenarioGraph import pl.touk.nussknacker.engine.api.process.{ ProcessId, @@ -47,6 +48,8 @@ final case class ScenarioWithDetailsEntity[ScenarioShape]( ) extends ListenerScenarioWithDetails { lazy val idWithName: ProcessIdWithName = ProcessIdWithName(processId, name) + lazy val idData: ScenarioIdData = ScenarioIdData(processId, name, processingType) + def mapScenario[NewShape](action: ScenarioShape => NewShape): ScenarioWithDetailsEntity[NewShape] = copy(json = action(json)) @@ -68,3 +71,7 @@ final case class ScenarioWithDetailsEntity[ScenarioShape]( } } + +// It is a set of id-like data that allow to identify scenario both in local storage and on engine side +// On engine side it is needed to have processingType (to navigate to correct DeploymentManager) and scenario name +final case class ScenarioIdData(id: ProcessId, name: ProcessName, processingType: ProcessingType) diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/DbScenarioActivityRepository.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/DbScenarioActivityRepository.scala index d30991fe394..ee3f0d7e626 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/DbScenarioActivityRepository.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/DbScenarioActivityRepository.scala @@ -7,6 +7,7 @@ import pl.touk.nussknacker.engine.api.Comment import pl.touk.nussknacker.engine.api.component.ProcessingMode import pl.touk.nussknacker.engine.api.deployment.ScenarioAttachment.{AttachmentFilename, AttachmentId} import pl.touk.nussknacker.engine.api.deployment._ +import pl.touk.nussknacker.engine.api.modelinfo.ModelInfo import pl.touk.nussknacker.engine.api.process.ProcessId import pl.touk.nussknacker.ui.api.description.scenarioActivity.Dtos.Legacy import pl.touk.nussknacker.ui.db.entity.{ @@ -21,7 +22,6 @@ import pl.touk.nussknacker.ui.process.repository.DbioRepository import pl.touk.nussknacker.ui.process.repository.activities.ScenarioActivityRepository.{ CommentModificationMetadata, DeleteAttachmentError, - ModifyActivityError, ModifyCommentError } import pl.touk.nussknacker.ui.security.api.LoggedUser @@ -349,10 +349,6 @@ class DbScenarioActivityRepository private (override protected val dbRef: DbRef, private lazy val attachmentInsertQuery = attachmentsTable returning attachmentsTable.map(_.id) into ((item, id) => item.copy(id = id)) - private def validateActivityExistsForScenario(entity: ScenarioActivityEntityData) = { - fromEntity(entity).left.map(_ => ModifyActivityError.CouldNotModifyActivity).map(_._2) - } - private def modifyActivityByActivityId[ERROR]( activityId: ScenarioActivityId, activityDoesNotExistError: ERROR, diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/ScenarioActivityRepository.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/ScenarioActivityRepository.scala index 97a84b7a0e4..b7fc58ddd5e 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/ScenarioActivityRepository.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/activities/ScenarioActivityRepository.scala @@ -113,13 +113,6 @@ object ScenarioActivityRepository { case object CouldNotDeleteAttachment extends DeleteAttachmentError } - sealed trait ModifyActivityError - - object ModifyActivityError { - case object ActivityDoesNotExist extends ModifyActivityError - case object CouldNotModifyActivity extends ModifyActivityError - } - final case class CommentModificationMetadata(commentForScenarioDeployed: Boolean) } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/server/AkkaHttpBasedRouteProvider.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/server/AkkaHttpBasedRouteProvider.scala index d8bad0a17b2..e22a04f2e25 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/server/AkkaHttpBasedRouteProvider.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/server/AkkaHttpBasedRouteProvider.scala @@ -47,6 +47,13 @@ import pl.touk.nussknacker.ui.metrics.RepositoryGauges import pl.touk.nussknacker.ui.migrations.{MigrationApiAdapterService, MigrationService} import pl.touk.nussknacker.ui.notifications.{Notification, NotificationConfig, NotificationServiceImpl} import pl.touk.nussknacker.ui.process._ +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.EngineSideDeploymentStatusesProvider +import pl.touk.nussknacker.ui.process.deployment.reconciliation.{ + FinishedDeploymentsStatusesSynchronizationConfig, + FinishedDeploymentsStatusesSynchronizationScheduler, + ScenarioDeploymentReconciler +} +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.deployment.{ ActionInfoService, ActionService, @@ -56,7 +63,6 @@ import pl.touk.nussknacker.ui.process.deployment.{ DeploymentService => LegacyDeploymentService, RepositoryBasedScenarioActivityManager, ScenarioResolver, - ScenarioStateProvider, ScenarioTestExecutorServiceImpl } import pl.touk.nussknacker.ui.process.fragment.{DefaultFragmentRepository, FragmentResolver} @@ -79,7 +85,7 @@ import pl.touk.nussknacker.ui.process.scenarioactivity.FetchScenarioActivityServ import pl.touk.nussknacker.ui.process.test.{PreliminaryScenarioTestDataSerDe, ScenarioTestService} import pl.touk.nussknacker.ui.process.version.{ScenarioGraphVersionRepository, ScenarioGraphVersionService} import pl.touk.nussknacker.ui.processreport.ProcessCounter -import pl.touk.nussknacker.ui.security.api.{AuthManager, AuthenticationResources} +import pl.touk.nussknacker.ui.security.api.{AuthManager, AuthenticationResources, NussknackerInternalUser} import pl.touk.nussknacker.ui.services.NuDesignerExposedApiHttpService import pl.touk.nussknacker.ui.statistics.repository.FingerprintRepositoryImpl import pl.touk.nussknacker.ui.statistics.{ @@ -156,37 +162,110 @@ class AkkaHttpBasedRouteProvider( ), dbioRunner ) - _ <- Resource.fromAutoCloseable( - IO { - val scheduler = new DeploymentsStatusesSynchronizationScheduler( - system, - deploymentsStatusesSynchronizer, - DeploymentsStatusesSynchronizationConfig.parse(resolvedDesignerConfig) - ) - scheduler.start() - scheduler - } + _ <- DeploymentsStatusesSynchronizationScheduler.resource( + system, + deploymentsStatusesSynchronizer, + DeploymentsStatusesSynchronizationConfig.parse(resolvedDesignerConfig) ) statisticsPublicKey <- Resource.fromAutoCloseable( IO { Source.fromURL(getClass.getResource("/encryption.key")) } ) - } yield { - val migrations = processingTypeDataProvider.mapValues(_.designerModelData.modelData.migrations) - val modelInfos = processingTypeDataProvider.mapValues(_.designerModelData.modelData.info) - - implicit val implicitDbioRunner: DBIOActionRunner = dbioRunner - val scenarioActivityRepository = DbScenarioActivityRepository.create(dbRef, designerClock) - val actionRepository = DbScenarioActionRepository.create(dbRef) - val stickyNotesRepository = DbStickyNotesRepository.create(dbRef, designerClock) - val scenarioLabelsRepository = new ScenarioLabelsRepository(dbRef) - val processRepository = DBFetchingProcessRepository.create(dbRef, actionRepository, scenarioLabelsRepository) + migrations = processingTypeDataProvider.mapValues(_.designerModelData.modelData.migrations) + modelInfos = processingTypeDataProvider.mapValues(_.designerModelData.modelData.info) + scenarioActivityRepository = DbScenarioActivityRepository.create(dbRef, designerClock) + actionRepository = DbScenarioActionRepository.create(dbRef) + stickyNotesRepository = DbStickyNotesRepository.create(dbRef, designerClock) + scenarioLabelsRepository = new ScenarioLabelsRepository(dbRef) + processRepository = DBFetchingProcessRepository.create(dbRef, actionRepository, scenarioLabelsRepository) // TODO: get rid of Future based repositories - it is easier to use everywhere one implementation - DBIOAction based which allows transactions handling - val futureProcessRepository = + futureProcessRepository = DBFetchingProcessRepository.createFutureRepository(dbRef, actionRepository, scenarioLabelsRepository) - val writeProcessRepository = + writeProcessRepository = ProcessRepository.create(dbRef, designerClock, scenarioActivityRepository, scenarioLabelsRepository, migrations) + processChangeListener = ProcessChangeListenerLoader.loadListeners( + getClass.getClassLoader, + resolvedDesignerConfig, + NussknackerServices(new PullProcessRepository(futureProcessRepository)) + ) + dmDispatcher = + new DeploymentManagerDispatcher( + processingTypeDataProvider.mapValues(_.deploymentData.validDeploymentManagerOrStub), + futureProcessRepository + ) + deploymentsStatusesProvider = + new EngineSideDeploymentStatusesProvider(dmDispatcher, featureTogglesConfig.scenarioStateTimeout) + scenarioStatusProvider = new ScenarioStatusProvider( + deploymentsStatusesProvider, + dmDispatcher, + processRepository, + actionRepository, + dbioRunner, + ) + actionService = new ActionService( + processRepository, + actionRepository, + dbioRunner, + processChangeListener, + scenarioStatusProvider, + featureTogglesConfig.deploymentCommentSettings, + designerClock + ) + reconciler = new ScenarioDeploymentReconciler( + processingTypeDataProvider.all(NussknackerInternalUser.instance).keys, + deploymentsStatusesProvider, + actionRepository, + dbioRunner + ) + _ <- FinishedDeploymentsStatusesSynchronizationScheduler.resource( + system, + reconciler, + FinishedDeploymentsStatusesSynchronizationConfig.parse(resolvedDesignerConfig) + ) + } yield { + implicit val implicitDbioRunner: DBIOActionRunner = dbioRunner + actionService.invalidateInProgressActions() + + actionServiceSupplier.set(actionService) + + val additionalComponentConfigs = processingTypeDataProvider.mapValues { processingTypeData => + processingTypeData.designerModelData.modelData.additionalConfigsFromProvider + } + + // we need to reload processing type data after deployment service creation to make sure that it will be done using + // correct classloader and that won't cause further delays during handling requests + processingTypeDataProvider.reloadAll().unsafeRunSync() + + val authenticationResources = + AuthenticationResources(resolvedDesignerConfig, getClass.getClassLoader, sttpBackend) + val authManager = new AuthManager(authenticationResources) + + Initialization.init( + migrations, + dbRef, + designerClock, + processRepository, + scenarioActivityRepository, + scenarioLabelsRepository, + environment + ) + + val newProcessPreparer = processingTypeDataProvider.mapValues { processingTypeData => + new NewProcessPreparer( + processingTypeData.deploymentData.metaDataInitializer, + processingTypeData.deploymentData.scenarioPropertiesConfig, + new ScenarioPropertiesConfigFinalizer(additionalUIConfigProvider, processingTypeData.name), + ) + } + + val stateDefinitionService = new ProcessStateDefinitionService( + processingTypeDataProvider + .mapValues(_.category) + .mapCombined(_.statusNameToStateDefinitionsMapping) + ) + + val scenarioStatusPresenter = new ScenarioStatusPresenter(dmDispatcher) val fragmentRepository = new DefaultFragmentRepository(futureProcessRepository) val fragmentResolver = new FragmentResolver(fragmentRepository) @@ -239,78 +318,10 @@ class AkkaHttpBasedRouteProvider( val scenarioResolver = scenarioTestServiceDeps.mapValues(_._3) val notificationsConfig = resolvedDesignerConfig.as[NotificationConfig]("notifications") - val processChangeListener = ProcessChangeListenerLoader.loadListeners( - getClass.getClassLoader, - resolvedDesignerConfig, - NussknackerServices(new PullProcessRepository(futureProcessRepository)) - ) - - val dmDispatcher = - new DeploymentManagerDispatcher( - processingTypeDataProvider.mapValues(_.deploymentData.validDeploymentManagerOrStub), - futureProcessRepository - ) - - val additionalComponentConfigs = processingTypeDataProvider.mapValues { processingTypeData => - processingTypeData.designerModelData.modelData.additionalConfigsFromProvider - } - - val scenarioStateProvider = - ScenarioStateProvider( - dmDispatcher, - processRepository, - actionRepository, - dbioRunner, - featureTogglesConfig.scenarioStateTimeout - ) - val actionService = new ActionService( - dmDispatcher, - processRepository, - actionRepository, - dbioRunner, - processChangeListener, - scenarioStateProvider, - featureTogglesConfig.deploymentCommentSettings, - designerClock - ) - actionService.invalidateInProgressActions() - - actionServiceSupplier.set(actionService) - - // we need to reload processing type data after deployment service creation to make sure that it will be done using - // correct classloader and that won't cause further delays during handling requests - processingTypeDataProvider.reloadAll().unsafeRunSync() - - val authenticationResources = - AuthenticationResources(resolvedDesignerConfig, getClass.getClassLoader, sttpBackend) - val authManager = new AuthManager(authenticationResources) - - Initialization.init( - migrations, - dbRef, - designerClock, - processRepository, - scenarioActivityRepository, - scenarioLabelsRepository, - environment - ) - - val newProcessPreparer = processingTypeDataProvider.mapValues { processingTypeData => - new NewProcessPreparer( - processingTypeData.deploymentData.metaDataInitializer, - processingTypeData.deploymentData.scenarioPropertiesConfig, - new ScenarioPropertiesConfigFinalizer(additionalUIConfigProvider, processingTypeData.name), - ) - } - - val stateDefinitionService = new ProcessStateDefinitionService( - processingTypeDataProvider - .mapValues(_.category) - .mapCombined(_.statusNameToStateDefinitionsMapping) - ) val processService = new DBProcessService( - scenarioStateProvider, + scenarioStatusProvider, + scenarioStatusPresenter, newProcessPreparer, processingTypeDataProvider.mapCombined(_.parametersService), processResolver, @@ -518,7 +529,8 @@ class AkkaHttpBasedRouteProvider( val routes = List( new ProcessesResources( processService = processService, - scenarioStateProvider = scenarioStateProvider, + scenarioStatusProvider = scenarioStatusProvider, + scenarioStatusPresenter = scenarioStatusPresenter, processToolbarService = configProcessToolbarService, processAuthorizer = processAuthorizer, processChangeListener = processChangeListener @@ -537,7 +549,6 @@ class AkkaHttpBasedRouteProvider( dmDispatcher, metricsRegistry, scenarioTestService, - processingTypeDataProvider.mapValues(_.designerModelData.modelData) ), new ValidationResources(processService, processResolver), new DefinitionResources( diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatistics.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatistics.scala index dca770e7242..b8b3d36190d 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatistics.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatistics.scala @@ -225,7 +225,7 @@ object ScenarioStatistics { LiteK8sDMCount -> (inputData.deploymentManagerType == liteK8sDeploymentManagerType), LiteEmbeddedDMCount -> (inputData.deploymentManagerType == liteEmbeddedDeploymentManagerType), UnknownDMCount -> !knownDeploymentManagerTypes.contains(inputData.deploymentManagerType), - ActiveScenarioCount -> inputData.status.contains(SimpleStateStatus.Running), + ActiveScenarioCount -> inputData.status.contains(SimpleStateStatus.Running.name), ).map { case (k, v) => (k.toString, if (v) 1 else 0) } } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsService.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsService.scala index ee675e28a63..d73af284664 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsService.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsService.scala @@ -3,7 +3,7 @@ package pl.touk.nussknacker.ui.statistics import cats.data.EitherT import com.typesafe.scalalogging.LazyLogging import pl.touk.nussknacker.engine.api.component.{DesignerWideComponentId, ProcessingMode} -import pl.touk.nussknacker.engine.api.deployment.{ProcessAction, StateStatus} +import pl.touk.nussknacker.engine.api.deployment.ProcessAction import pl.touk.nussknacker.engine.api.graph.ScenarioGraph import pl.touk.nussknacker.engine.api.process.{ProcessId, VersionId} import pl.touk.nussknacker.engine.definition.component.ComponentDefinitionWithImplementation @@ -57,7 +57,7 @@ object UsageStatisticsReportsSettingsService extends LazyLogging { isFragment = scenario.isFragment, processingMode = scenario.processingMode, deploymentManagerType = deploymentManagerTypeByProcessingType(scenario.processingType), - status = scenario.state.map(_.status), + status = scenario.state.map(_.status.name), nodesCount = scenario.scenarioGraph.map(_.nodes.length).getOrElse(0), scenarioCategory = scenario.processCategory, scenarioVersion = scenario.processVersionId, @@ -177,7 +177,7 @@ private[statistics] case class ScenarioStatisticsInputData( processingMode: ProcessingMode, deploymentManagerType: DeploymentManagerType, // For fragments status is empty - status: Option[StateStatus], + status: Option[String], nodesCount: Int, scenarioCategory: String, scenarioVersion: VersionId, diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/util/FutureUtils.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/util/FutureUtils.scala index 7aa83ea4ab2..e09204dc09e 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/util/FutureUtils.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/util/FutureUtils.scala @@ -13,24 +13,16 @@ object FutureUtils { // This solution is based on: https://stackoverflow.com/a/42468372/1370301 def withTimeout(duration: FiniteDuration, timeoutResult: => T)( implicit actorSystem: ActorSystem - ): Future[LimitedByTimeoutResult[T]] = { + ): Future[T] = { import actorSystem._ Future.firstCompletedOf( Seq( - akka.pattern.after(duration)(Future.successful(CompletedByTimeout(timeoutResult))), - future.map(CompletedNormally(_)) + akka.pattern.after(duration)(Future.successful(timeoutResult)), + future ) ) } } - sealed trait LimitedByTimeoutResult[T] { - def value: T - } - - final case class CompletedNormally[T](value: T) extends LimitedByTimeoutResult[T] - - final case class CompletedByTimeout[T](value: T) extends LimitedByTimeoutResult[T] - } diff --git a/designer/server/src/main/scala/pl/touk/nussknacker/ui/validation/NodeValidator.scala b/designer/server/src/main/scala/pl/touk/nussknacker/ui/validation/NodeValidator.scala index ed42bdb2296..e24ad84bee3 100644 --- a/designer/server/src/main/scala/pl/touk/nussknacker/ui/validation/NodeValidator.scala +++ b/designer/server/src/main/scala/pl/touk/nussknacker/ui/validation/NodeValidator.scala @@ -1,11 +1,10 @@ package pl.touk.nussknacker.ui.validation import pl.touk.nussknacker.engine.ModelData -import pl.touk.nussknacker.engine.api.{JobData, MetaData, ProcessVersion} import pl.touk.nussknacker.engine.api.context.ProcessCompilationError.MissingParameters import pl.touk.nussknacker.engine.api.context.{ProcessCompilationError, ValidationContext} -import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.api.typed.typing.TypingResult +import pl.touk.nussknacker.engine.api.{JobData, ProcessVersion} import pl.touk.nussknacker.engine.compile.FragmentResolver import pl.touk.nussknacker.engine.compile.nodecompilation.NodeDataValidator.OutgoingEdge import pl.touk.nussknacker.engine.compile.nodecompilation.{ diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/test/base/it/NuResourcesTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/test/base/it/NuResourcesTest.scala index 46d08382e61..6a71076d176 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/test/base/it/NuResourcesTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/test/base/it/NuResourcesTest.scala @@ -45,11 +45,12 @@ import pl.touk.nussknacker.test.utils.domain.{ProcessTestData, TestFactory} import pl.touk.nussknacker.test.utils.scalas.AkkaHttpExtensions.toRequestEntity import pl.touk.nussknacker.ui.api._ import pl.touk.nussknacker.ui.config.scenariotoolbar.CategoriesScenarioToolbarsConfigParser -import pl.touk.nussknacker.ui.config.FeatureTogglesConfig -import pl.touk.nussknacker.ui.config.DesignerConfig +import pl.touk.nussknacker.ui.config.{DesignerConfig, FeatureTogglesConfig} import pl.touk.nussknacker.ui.process.ProcessService.{CreateScenarioCommand, UpdateScenarioCommand} import pl.touk.nussknacker.ui.process._ import pl.touk.nussknacker.ui.process.deployment._ +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.EngineSideDeploymentStatusesProvider +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.fragment.DefaultFragmentRepository import pl.touk.nussknacker.ui.process.marshall.CanonicalProcessConverter import pl.touk.nussknacker.ui.process.processingtype.ProcessingTypeData.SchedulingForProcessingType @@ -116,21 +117,25 @@ trait NuResourcesTest futureFetchingScenarioRepository ) - protected val scenarioStateProvider: ScenarioStateProvider = ScenarioStateProvider( + protected val deploymentsStatusesProvider = + new EngineSideDeploymentStatusesProvider(dmDispatcher, None) + + protected val scenarioStatusProvider: ScenarioStatusProvider = new ScenarioStatusProvider( + deploymentsStatusesProvider, dmDispatcher, fetchingProcessRepository, actionRepository, dbioRunner, - scenarioStateTimeout = None ) + protected val scenarioStatusPresenter = new ScenarioStatusPresenter(dmDispatcher) + protected val actionService: ActionService = new ActionService( - dmDispatcher, fetchingProcessRepository, actionRepository, dbioRunner, processChangeListener, - scenarioStateProvider, + scenarioStatusProvider, deploymentCommentSettings, Clock.systemUTC() ) @@ -195,7 +200,7 @@ trait NuResourcesTest ) } - protected val processService: DBProcessService = createDBProcessService(scenarioStateProvider) + protected val processService: DBProcessService = createDBProcessService(scenarioStatusProvider) protected val scenarioTestServiceByProcessingType: ProcessingTypeDataProvider[ScenarioTestService, _] = mapProcessingTypeDataProvider( @@ -207,7 +212,8 @@ trait NuResourcesTest protected val processesRoute = new ProcessesResources( processService = processService, - scenarioStateProvider = scenarioStateProvider, + scenarioStatusProvider = scenarioStatusProvider, + scenarioStatusPresenter = scenarioStatusPresenter, processToolbarService = configProcessToolbarService, processAuthorizer = processAuthorizer, processChangeListener = processChangeListener @@ -226,9 +232,10 @@ trait NuResourcesTest RealLoggedUser(id, name, Map(Category1.stringify -> permissions.toSet)) } - protected def createDBProcessService(processStateProvider: ScenarioStateProvider): DBProcessService = + protected def createDBProcessService(processStateProvider: ScenarioStatusProvider): DBProcessService = new DBProcessService( processStateProvider, + scenarioStatusPresenter, newProcessPreparerByProcessingType, typeToConfig.mapCombined(_.parametersService), processResolverByProcessingType, @@ -264,7 +271,6 @@ trait NuResourcesTest dispatcher = dmDispatcher, metricRegistry = new MetricRegistry, scenarioTestServices = scenarioTestServiceByProcessingType, - typeToConfig = typeToConfig.mapValues(_.designerModelData.modelData) ) override def beforeEach(): Unit = { diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/MockDeploymentManager.scala b/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/MockDeploymentManager.scala index 610ef66f15c..3945e208760 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/MockDeploymentManager.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/MockDeploymentManager.scala @@ -5,24 +5,26 @@ import cats.data.Validated.valid import cats.data.ValidatedNel import cats.effect.unsafe.IORuntime import com.typesafe.config.Config +import org.apache.flink.api.common.{JobID, JobStatus} import org.apache.flink.configuration.Configuration import sttp.client3.testing.SttpBackendStub import pl.touk.nussknacker.engine._ import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.process.ProcessName +import pl.touk.nussknacker.engine.api.process.{ProcessName, VersionId} import pl.touk.nussknacker.engine.deployment._ import pl.touk.nussknacker.engine.flink.minicluster.FlinkMiniClusterFactory import pl.touk.nussknacker.engine.flink.minicluster.scenariotesting.ScenarioStateVerificationConfig import pl.touk.nussknacker.engine.management.jobrunner.FlinkScenarioJobRunner +import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{JobOverview, JobTasksOverview} import pl.touk.nussknacker.engine.management.{FlinkConfig, FlinkDeploymentManager, FlinkDeploymentManagerProvider} import pl.touk.nussknacker.engine.util.loader.{DeploymentManagersClassLoader, ModelClassLoader} import pl.touk.nussknacker.test.config.ConfigWithScalaVersion import pl.touk.nussknacker.test.mock.MockDeploymentManager.{ sampleCustomActionActivity, sampleDeploymentId, - sampleStatusDetails + sampleDeploymentStatusDetails } import pl.touk.nussknacker.test.utils.domain.TestFactory import pl.touk.nussknacker.ui.process.periodic.flink.FlinkClientStub @@ -32,13 +34,14 @@ import java.util.UUID import java.util.concurrent.{ConcurrentHashMap, ConcurrentLinkedQueue} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.jdk.CollectionConverters._ import scala.util.Try // DEPRECATED!!! Use `WithMockableDeploymentManager` trait and `MockableDeploymentManager` instead class MockDeploymentManager private ( modelData: ModelData, deploymentManagerDependencies: DeploymentManagerDependencies, - defaultProcessStateStatus: StateStatus, + defaultDeploymentStatus: StateStatus, scenarioActivityManager: ScenarioActivityManager, customProcessStateDefinitionManager: Option[ProcessStateDefinitionManager], closeCreatedDeps: () => Unit, @@ -48,7 +51,7 @@ class MockDeploymentManager private ( FlinkConfig(None, scenarioStateVerification = ScenarioStateVerificationConfig(enabled = false)), Some( FlinkMiniClusterFactory - .createMiniClusterWithServices(modelData.modelClassLoader, new Configuration, new Configuration) + .createMiniClusterWithServices(modelData.modelClassLoader, new Configuration) ), FlinkClientStub, FlinkScenarioJobRunnerStub @@ -61,7 +64,7 @@ class MockDeploymentManager private ( @volatile var cancelResult: Future[Unit] = Future.successful(()) - val managerProcessStates = new ConcurrentHashMap[ProcessName, List[StatusDetails]] + val managerProcessStates = new ConcurrentHashMap[ProcessName, List[DeploymentStatusDetails]] @volatile var delayBeforeStateReturn: FiniteDuration = 0 seconds @@ -75,16 +78,33 @@ class MockDeploymentManager private ( case None => super.processStateDefinitionManager } - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { + override protected def getScenarioDeploymentsStatusesWithJobOverview( + scenarioName: ProcessName + )( + implicit freshnessPolicy: DataFreshnessPolicy + ): Future[WithDataFreshnessStatus[List[(DeploymentStatusDetails, JobOverview)]]] = { Future { Thread.sleep(delayBeforeStateReturn.toMillis) WithDataFreshnessStatus.fresh( - managerProcessStates.getOrDefault( - name, - List(sampleStatusDetails(defaultProcessStateStatus, sampleDeploymentId)) - ) + managerProcessStates + .getOrDefault( + scenarioName, + List(sampleDeploymentStatusDetails(defaultDeploymentStatus, sampleDeploymentId)) + ) + .map { deploymentStatus => + val tasksOverview = JobTasksOverview(1, 0, 0, 0, 1, 0, 0, 0, 0, 0, None) + val deploymentIdUuid = + deploymentStatus.deploymentId.map(id => UUID.fromString(id.value)).getOrElse(UUID.randomUUID()) + val jobOverview = JobOverview( + new JobID(deploymentIdUuid.getLeastSignificantBits, deploymentIdUuid.getLeastSignificantBits), + "not-important", + -1, + -1, + JobStatus.RUNNING.name(), + tasksOverview + ) + (deploymentStatus, jobOverview) + } ) } } @@ -102,9 +122,18 @@ class MockDeploymentManager private ( override protected def cancelScenario(command: DMCancelScenarioCommand): Future[Unit] = cancelResult - // We override this field, because currently, this mock returns fallback for not defined scenarios states. - // To make stateQueryForAllScenariosSupport consistent with this approach, we should remove this fallback. - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + new DeploymentsStatusesQueryForAllScenariosSupported { + + override def getAllScenariosDeploymentsStatuses()( + implicit freshnessPolicy: DataFreshnessPolicy + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]] = { + Future { + WithDataFreshnessStatus.fresh(managerProcessStates.asScala.toMap) + } + } + + } override def close(): Unit = { super.close() @@ -119,7 +148,7 @@ object FlinkScenarioJobRunnerStub extends FlinkScenarioJobRunner { override def runScenarioJob( command: DMRunDeploymentCommand, savepointPathOpt: Option[String] - ): Future[Option[ExternalDeploymentId]] = + ): Future[Option[JobID]] = Future.failed(new IllegalAccessException("This implementation shouldn't be used")) } @@ -169,11 +198,12 @@ object MockDeploymentManager { ) } - private[mock] def sampleStatusDetails( + private[mock] def sampleDeploymentStatusDetails( status: StateStatus, deploymentId: DeploymentId, - version: Option[ProcessVersion] = Some(ProcessVersion.empty) - ): StatusDetails = StatusDetails(status, Some(deploymentId), Some(ExternalDeploymentId("1")), version) + version: Option[VersionId] = Some(VersionId.initialVersionId) + ): DeploymentStatusDetails = + DeploymentStatusDetails(status, Some(deploymentId), version) // Pass correct deploymentId private[mock] def sampleDeploymentId: DeploymentId = DeploymentId(UUID.randomUUID().toString) @@ -242,7 +272,7 @@ object MockDeploymentManagerSyntaxSugar { } } - def withProcessStates[T](processName: ProcessName, statuses: List[StatusDetails])(action: => T): T = { + def withProcessStates[T](processName: ProcessName, statuses: List[DeploymentStatusDetails])(action: => T): T = { try { deploymentManager.managerProcessStates.put(processName, statuses) action @@ -266,13 +296,13 @@ object MockDeploymentManagerSyntaxSugar { status: StateStatus, deploymentId: DeploymentId = sampleDeploymentId )(action: => T): T = { - withProcessStates(processName, List(sampleStatusDetails(status, deploymentId)))(action) + withProcessStates(processName, List(sampleDeploymentStatusDetails(status, deploymentId)))(action) } - def withProcessStateVersion[T](processName: ProcessName, status: StateStatus, version: Option[ProcessVersion])( + def withProcessStateVersion[T](processName: ProcessName, status: StateStatus, version: Option[VersionId])( action: => T ): T = { - withProcessStates(processName, List(sampleStatusDetails(status, sampleDeploymentId, version)))(action) + withProcessStates(processName, List(sampleDeploymentStatusDetails(status, sampleDeploymentId, version)))(action) } def withEmptyProcessState[T](processName: ProcessName)(action: => T): T = { diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/StubScenarioStateProvider.scala b/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/StubScenarioStateProvider.scala deleted file mode 100644 index ab6264838d3..00000000000 --- a/designer/server/src/test/scala/pl/touk/nussknacker/test/mock/StubScenarioStateProvider.scala +++ /dev/null @@ -1,40 +0,0 @@ -package pl.touk.nussknacker.test.mock - -import cats.Traverse -import db.util.DBIOActionInstances.DB -import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} -import pl.touk.nussknacker.restmodel.scenariodetails.ScenarioWithDetails -import pl.touk.nussknacker.ui.process.deployment.ScenarioStateProvider -import pl.touk.nussknacker.ui.process.repository.ScenarioWithDetailsEntity -import pl.touk.nussknacker.ui.security.api.LoggedUser -import slick.dbio.DBIO - -import scala.concurrent.Future -import scala.language.higherKinds - -class StubScenarioStateProvider(states: Map[ProcessName, ProcessState]) extends ScenarioStateProvider { - - override def getProcessState( - processDetails: ScenarioWithDetailsEntity[_] - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] = - Future.successful(states(processDetails.name)) - - override def getProcessState( - processIdWithName: ProcessIdWithName, - currentlyPresentedVersionId: Option[VersionId], - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): Future[ProcessState] = - Future.successful(states(processIdWithName.name)) - - override def enrichDetailsWithProcessState[F[_]: Traverse](processTraverse: F[ScenarioWithDetails])( - implicit user: LoggedUser, - freshnessPolicy: DataFreshnessPolicy - ): Future[F[ScenarioWithDetails]] = Future.successful(processTraverse) - - override def getProcessStateDBIO( - processDetails: ScenarioWithDetailsEntity[_], - currentlyPresentedVersionId: Option[VersionId] - )(implicit user: LoggedUser, freshnessPolicy: DataFreshnessPolicy): DB[ProcessState] = - DBIO.successful(states(processDetails.name)) - -} diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestFactory.scala b/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestFactory.scala index 950bf3a106b..915ed0c1a54 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestFactory.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestFactory.scala @@ -23,7 +23,7 @@ import pl.touk.nussknacker.restmodel.scenariodetails.ScenarioParameters import pl.touk.nussknacker.security.Permission import pl.touk.nussknacker.test.config.WithSimplifiedDesignerConfig.TestProcessingType.Streaming import pl.touk.nussknacker.test.config.WithSimplifiedDesignerConfig.{TestCategory, TestProcessingType} -import pl.touk.nussknacker.test.mock.{StubFragmentRepository, StubScenarioStateProvider, TestAdditionalUIConfigProvider} +import pl.touk.nussknacker.test.mock.{StubFragmentRepository, TestAdditionalUIConfigProvider} import pl.touk.nussknacker.ui.api.{RouteWithUser, RouteWithoutUser} import pl.touk.nussknacker.ui.db.DbRef import pl.touk.nussknacker.ui.definition.ScenarioPropertiesConfigFinalizer @@ -143,8 +143,6 @@ object TestFactory { ) } - def processStateProvider() = new StubScenarioStateProvider(Map.empty) - def newDBIOActionRunner(dbRef: DbRef): DBIOActionRunner = DBIOActionRunner(dbRef) diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestProcessUtil.scala b/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestProcessUtil.scala index 1aa56b7b189..f437fb93cda 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestProcessUtil.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/test/utils/domain/TestProcessUtil.scala @@ -123,7 +123,7 @@ object TestProcessUtil { scenarioLabels = scenarioLabels, lastAction = lastAction.map(createProcessAction), lastStateAction = lastAction.collect { - case action if ScenarioActionName.StateActions.contains(action) => createProcessAction(action) + case action if ScenarioActionName.ScenarioStatusActions.contains(action) => createProcessAction(action) }, lastDeployedAction = lastAction.collect { case Deploy => createProcessAction(Deploy) diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceBusinessSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceBusinessSpec.scala index d9f0245c76e..49c2e396516 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceBusinessSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceBusinessSpec.scala @@ -8,6 +8,7 @@ import io.restassured.module.scala.RestAssuredSupport.AddThenToResponse import org.hamcrest.Matchers._ import org.scalatest.freespec.AnyFreeSpecLike import org.scalatest.matchers.must.Matchers.be +import pl.touk.nussknacker.development.manager.BasicStatusDetails import pl.touk.nussknacker.development.manager.MockableDeploymentManagerProvider.MockableDeploymentManager import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus @@ -42,7 +43,7 @@ class AppApiHttpServiceBusinessSpec createDeployedExampleScenario(ProcessName("id1")) MockableDeploymentManager.configureScenarioStatuses( - Map("id1" -> SimpleStateStatus.Running) + Map("id1" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) } .when() @@ -70,9 +71,8 @@ class AppApiHttpServiceBusinessSpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -94,10 +94,6 @@ class AppApiHttpServiceBusinessSpec .applicationState { createDeployedCanceledExampleScenario(ProcessName("id1")) createDeployedExampleScenario(ProcessName("id2")) - - MockableDeploymentManager.configureScenarioStatuses( - Map("id2" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin")) - ) } .when() .basicAuthAllPermUser() @@ -120,8 +116,8 @@ class AppApiHttpServiceBusinessSpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> SimpleStateStatus.Running, - "id2" -> SimpleStateStatus.Running, + "id1" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -144,7 +140,7 @@ class AppApiHttpServiceBusinessSpec createDeployedExampleScenario(ProcessName("id1")) MockableDeploymentManager.configureScenarioStatuses( - Map("id1" -> SimpleStateStatus.Running) + Map("id1" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) } .when() @@ -169,7 +165,7 @@ class AppApiHttpServiceBusinessSpec createDeployedExampleScenario(ProcessName("id1")) MockableDeploymentManager.configureScenarioStatuses( - Map("id1" -> SimpleStateStatus.Running) + Map("id1" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) } .basicAuthAllPermUser() @@ -259,9 +255,8 @@ class AppApiHttpServiceBusinessSpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceSecuritySpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceSecuritySpec.scala index d59d0388878..c2a22681d0f 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceSecuritySpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/AppApiHttpServiceSecuritySpec.scala @@ -6,6 +6,7 @@ import io.restassured.RestAssured._ import io.restassured.module.scala.RestAssuredSupport.AddThenToResponse import org.hamcrest.Matchers._ import org.scalatest.freespec.AnyFreeSpecLike +import pl.touk.nussknacker.development.manager.BasicStatusDetails import pl.touk.nussknacker.development.manager.MockableDeploymentManagerProvider.MockableDeploymentManager import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus @@ -39,8 +40,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> SimpleStateStatus.Running, - "id2" -> SimpleStateStatus.Running + "id1" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))) ) ) } @@ -70,9 +71,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -100,9 +100,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -124,9 +123,9 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> SimpleStateStatus.ProblemStateStatus.Failed, - "id2" -> SimpleStateStatus.ProblemStateStatus.Failed, - "id3" -> SimpleStateStatus.ProblemStateStatus.Failed + "id1" -> BasicStatusDetails(SimpleStateStatus.ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.ProblemStateStatus.Failed, None), + "id3" -> BasicStatusDetails(SimpleStateStatus.ProblemStateStatus.Failed, None) ) ) } @@ -156,8 +155,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> SimpleStateStatus.NotDeployed, - "id2" -> SimpleStateStatus.NotDeployed + "id1" -> BasicStatusDetails(SimpleStateStatus.NotDeployed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.NotDeployed, None) ) ) } @@ -185,9 +184,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -205,13 +203,6 @@ class AppApiHttpServiceSecuritySpec .applicationState { createDeployedExampleScenario(ProcessName("id1"), category = Category1) createDeployedExampleScenario(ProcessName("id2"), category = Category2) - - MockableDeploymentManager.configureScenarioStatuses( - Map( - "id1" -> SimpleStateStatus.NotDeployed, - "id2" -> SimpleStateStatus.NotDeployed - ) - ) } .when() .noAuth() @@ -319,9 +310,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -347,9 +337,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -371,9 +360,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.Failed, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -402,9 +390,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.FailedToGet, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -423,9 +410,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.FailedToGet, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -447,9 +433,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.FailedToGet, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } @@ -471,9 +456,8 @@ class AppApiHttpServiceSecuritySpec MockableDeploymentManager.configureScenarioStatuses( Map( - "id1" -> ProblemStateStatus.FailedToGet, - "id2" -> SimpleStateStatus.Running, - "id3" -> ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin"), + "id1" -> BasicStatusDetails(ProblemStateStatus.FailedToGet, None), + "id2" -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))), ) ) } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ManagementResourcesSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ManagementResourcesSpec.scala index 6ee8371026d..debdb17785f 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ManagementResourcesSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ManagementResourcesSpec.scala @@ -313,7 +313,7 @@ class ManagementResourcesSpec status shouldBe StatusCodes.Conflict } getProcess(invalidScenario.name) ~> check { - decodeDetails.state.value.status shouldEqual SimpleStateStatus.NotDeployed + decodeDetails.state.value.status.name shouldEqual SimpleStateStatus.NotDeployed.name } } } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ProcessesResourcesSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ProcessesResourcesSpec.scala index e6d628ee990..15c25c40347 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ProcessesResourcesSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/api/ProcessesResourcesSpec.scala @@ -12,6 +12,7 @@ import org.scalatest.LoneElement._ import org.scalatest._ import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers +import pl.touk.nussknacker.development.manager.BasicStatusDetails import pl.touk.nussknacker.development.manager.MockableDeploymentManagerProvider.MockableDeploymentManager import pl.touk.nussknacker.engine.api.ProcessAdditionalFields import pl.touk.nussknacker.engine.api.component.ProcessingMode @@ -179,7 +180,7 @@ class ProcessesResourcesSpec test("return single process") { createDeployedExampleScenario(processName, category = Category1) MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) forScenarioReturned(processName) { process => @@ -282,7 +283,7 @@ class ProcessesResourcesSpec test("not allow to archive still running process") { createDeployedExampleScenario(processName, category = Category1) MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) archiveProcess(processName) { status => @@ -343,7 +344,7 @@ class ProcessesResourcesSpec test("should not allow to rename deployed process") { createDeployedExampleScenario(processName, category = Category1) MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) val newName = ProcessName("ProcessChangedName") @@ -369,7 +370,7 @@ class ProcessesResourcesSpec ignore("should not allow to rename process with running state") { createEmptyScenario(processName, category = Category1) MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) val newName = ProcessName("ProcessChangedName") @@ -602,8 +603,8 @@ class ProcessesResourcesSpec MockableDeploymentManager.configureScenarioStatuses( Map( - secondProcessor.value -> SimpleStateStatus.Canceled, - thirdProcessor.value -> SimpleStateStatus.Running + secondProcessor.value -> BasicStatusDetails(SimpleStateStatus.Canceled, Some(VersionId(1))), + thirdProcessor.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1))) ) ) @@ -1265,7 +1266,7 @@ class ProcessesResourcesSpec test("should return status for single deployed process") { createDeployedExampleScenario(processName, category = Category1) MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) forScenarioStatus(processName) { (code, state) => @@ -1346,7 +1347,7 @@ class ProcessesResourcesSpec private def verifyProcessWithStateOnList(expectedName: ProcessName, expectedStatus: Option[StateStatus]): Unit = { MockableDeploymentManager.configureScenarioStatuses( - Map(processName.value -> SimpleStateStatus.Running) + Map(processName.value -> BasicStatusDetails(SimpleStateStatus.Running, Some(VersionId(1)))) ) forScenariosReturned(ScenarioQuery.empty) { processes => diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/definition/component/DefaultComponentServiceSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/definition/component/DefaultComponentServiceSpec.scala index 43e0f0ef173..de5fa6fe4c1 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/definition/component/DefaultComponentServiceSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/definition/component/DefaultComponentServiceSpec.scala @@ -6,6 +6,7 @@ import org.scalatest.OptionValues import org.scalatest.exceptions.TestFailedException import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers +import org.scalatestplus.mockito.MockitoSugar.mock import pl.touk.nussknacker.engine.ModelData import pl.touk.nussknacker.engine.api.component.Component.AllowedProcessingModes import pl.touk.nussknacker.engine.api.component.ComponentType._ @@ -27,6 +28,7 @@ import pl.touk.nussknacker.test.mock.{MockFetchingProcessRepository, MockManager import pl.touk.nussknacker.test.utils.domain.TestFactory import pl.touk.nussknacker.test.utils.domain.TestProcessUtil.createFragmentEntity import pl.touk.nussknacker.test.{EitherValuesDetailedMessage, PatientScalaFutures, ValidatedValuesDetailedMessage} +import pl.touk.nussknacker.ui.api.ScenarioStatusPresenter import pl.touk.nussknacker.ui.config.ComponentLinkConfig._ import pl.touk.nussknacker.ui.config.{ComponentLinkConfig, ComponentLinksConfigExtractor} import pl.touk.nussknacker.ui.definition.AlignedComponentsDefinitionProvider @@ -40,6 +42,7 @@ import pl.touk.nussknacker.ui.definition.component.ComponentModelData._ import pl.touk.nussknacker.ui.definition.component.ComponentTestProcessData._ import pl.touk.nussknacker.ui.definition.component.DynamicComponentProvider._ import pl.touk.nussknacker.ui.process.DBProcessService +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.fragment.DefaultFragmentRepository import pl.touk.nussknacker.ui.process.processingtype.ProcessingTypeData.SchedulingForProcessingType import pl.touk.nussknacker.ui.process.processingtype.loader.ProcessingTypeDataLoader @@ -880,7 +883,8 @@ class DefaultComponentServiceSpec scenarioParametersServiceProvider: ProcessingTypeDataProvider[_, ScenarioParametersService], ): DBProcessService = new DBProcessService( - processStateProvider = TestFactory.processStateProvider(), + scenarioStatusProvider = mock[ScenarioStatusProvider], + scenarioStatusPresenter = mock[ScenarioStatusPresenter], newProcessPreparers = TestFactory.newProcessPreparerByProcessingType, scenarioParametersServiceProvider = scenarioParametersServiceProvider, processResolverByProcessingType = TestFactory.processResolverByProcessingType, diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/notifications/NotificationServiceTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/notifications/NotificationServiceTest.scala index ee83df1cccc..206d0fc0b3b 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/notifications/NotificationServiceTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/notifications/NotificationServiceTest.scala @@ -11,8 +11,8 @@ import org.scalatestplus.mockito.MockitoSugar import pl.touk.nussknacker.engine.api.component.NodesDeploymentData import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleProcessStateDefinitionManager +import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName} import pl.touk.nussknacker.engine.build.ScenarioBuilder import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.{ @@ -32,6 +32,8 @@ import pl.touk.nussknacker.ui.listener.ProcessChangeListener import pl.touk.nussknacker.ui.notifications.NotificationService.NotificationsScope import pl.touk.nussknacker.ui.process.deployment.LoggedUserConversions._ import pl.touk.nussknacker.ui.process.deployment._ +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.EngineSideDeploymentStatusesProvider +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.processingtype.provider.ProcessingTypeDataProvider import pl.touk.nussknacker.ui.process.repository.ProcessRepository.CreateProcessAction import pl.touk.nussknacker.ui.process.repository.activities.DbScenarioActivityRepository @@ -101,8 +103,8 @@ class NotificationServiceTest val id = saveSampleProcess(processName) val processIdWithName = ProcessIdWithName(id, processName) - val deploymentManager = mock[DeploymentManager] - val (deploymentService, actionService, notificationService) = createServices(deploymentManager) + val deploymentManager = mock[DeploymentManager] + val (deploymentService, _, notificationService) = createServices(deploymentManager) def notificationsFor(user: LoggedUser): List[Notification] = notificationService @@ -278,25 +280,9 @@ class NotificationServiceTest notificationAfterExecutionFinished.head.id should not equal deployNotificationId } - private val notDeployed = - SimpleProcessStateDefinitionManager.processState( - StatusDetails(SimpleStateStatus.NotDeployed, None), - VersionId(1), - None, - Some(VersionId(1)), - ) - private def createServices(deploymentManager: DeploymentManager) = { - when( - deploymentManager.getProcessState( - any[ProcessIdWithName], - any[Option[ProcessAction]], - any[VersionId], - any[Option[VersionId]], - any[Option[VersionId]], - )(any[DataFreshnessPolicy]) - ) - .thenReturn(Future.successful(WithDataFreshnessStatus.fresh(notDeployed))) + when(deploymentManager.getScenarioDeploymentsStatuses(any[ProcessName])(any[DataFreshnessPolicy])) + .thenReturn(Future.successful(WithDataFreshnessStatus.fresh(List.empty[DeploymentStatusDetails]))) val managerDispatcher = mock[DeploymentManagerDispatcher] when(managerDispatcher.deploymentManager(any[String])(any[LoggedUser])).thenReturn(Some(deploymentManager)) when(managerDispatcher.deploymentManagerUnsafe(any[String])(any[LoggedUser])).thenReturn(deploymentManager) @@ -310,20 +296,21 @@ class NotificationServiceTest config, clock ) - val scenarioStateProvider = ScenarioStateProvider( + val deploymentsStatusesProvider = + new EngineSideDeploymentStatusesProvider(dmDispatcher, scenarioStateTimeout = None) + val scenarioStatusProvider = new ScenarioStatusProvider( + deploymentsStatusesProvider, managerDispatcher, dbProcessRepository, actionRepository, - dbioRunner, - scenarioStateTimeout = None + dbioRunner ) val actionService = new ActionService( - managerDispatcher, dbProcessRepository, actionRepository, dbioRunner, mock[ProcessChangeListener], - scenarioStateProvider, + scenarioStatusProvider, None, clock ) diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/DBProcessServiceSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/DBProcessServiceSpec.scala index f032ca8bb05..4cc82459014 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/DBProcessServiceSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/DBProcessServiceSpec.scala @@ -4,6 +4,7 @@ import org.scalatest.OptionValues import org.scalatest.exceptions.TestFailedException import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers +import org.scalatestplus.mockito.MockitoSugar.mock import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName.Deploy import pl.touk.nussknacker.engine.api.graph.ScenarioGraph import pl.touk.nussknacker.engine.api.process.ProcessIdWithName @@ -20,6 +21,8 @@ import pl.touk.nussknacker.test.utils.domain.{ProcessTestData, TestFactory} import pl.touk.nussknacker.ui.NuDesignerError import pl.touk.nussknacker.ui.NuDesignerError.XError import pl.touk.nussknacker.ui.api.ProcessesResources.ProcessUnmarshallingError +import pl.touk.nussknacker.ui.api.ScenarioStatusPresenter +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider import pl.touk.nussknacker.ui.process.exception.ProcessIllegalAction import pl.touk.nussknacker.ui.process.marshall.CanonicalProcessConverter import pl.touk.nussknacker.ui.process.repository.ScenarioWithDetailsEntity @@ -202,7 +205,8 @@ class DBProcessServiceSpec extends AnyFlatSpec with Matchers with PatientScalaFu processes: List[ScenarioWithDetailsEntity[ScenarioGraph]] = Nil ): DBProcessService = new DBProcessService( - processStateProvider = TestFactory.processStateProvider(), + scenarioStatusProvider = mock[ScenarioStatusProvider], + scenarioStatusPresenter = mock[ScenarioStatusPresenter], newProcessPreparers = TestFactory.newProcessPreparerByProcessingType, scenarioParametersServiceProvider = TestFactory.scenarioParametersServiceProvider, processResolverByProcessingType = TestFactory.processResolverByProcessingType, diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/ProcessStateDefinitionServiceSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/ProcessStateDefinitionServiceSpec.scala index 93bd8c5353a..cf2c5678503 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/ProcessStateDefinitionServiceSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/ProcessStateDefinitionServiceSpec.scala @@ -4,7 +4,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import pl.touk.nussknacker.engine.api.component.ComponentDefinition -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.StateDefinitionDetails.UnknownIcon import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import pl.touk.nussknacker.engine.api.deployment._ @@ -158,8 +158,8 @@ class ProcessStateDefinitionServiceSpec extends AnyFunSuite with Matchers { } private val emptyStateDefinitionManager = new ProcessStateDefinitionManager { - override def stateDefinitions: Map[StatusName, StateDefinitionDetails] = Map.empty - override def statusActions(processStatus: ProcessStatus): List[ScenarioActionName] = Nil + override def stateDefinitions: Map[StatusName, StateDefinitionDetails] = Map.empty + override def statusActions(input: ScenarioStatusWithScenarioContext): Set[ScenarioActionName] = Set.empty } private def createProcessingTypeDataMap( diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentServiceSpec.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentServiceSpec.scala index 2c40aabf662..9849610161e 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentServiceSpec.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/DeploymentServiceSpec.scala @@ -1,6 +1,5 @@ package pl.touk.nussknacker.ui.process.deployment -import akka.actor.ActorSystem import cats.implicits.toTraverseOps import cats.instances.list._ import db.util.DBIOActionInstances.DB @@ -8,39 +7,36 @@ import org.scalatest.LoneElement._ import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, OptionValues} +import pl.touk.nussknacker.engine.api.Comment import pl.touk.nussknacker.engine.api.component.NodesDeploymentData import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName.{Cancel, Deploy} import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus import pl.touk.nussknacker.engine.api.process._ -import pl.touk.nussknacker.engine.api.{Comment, ProcessVersion} import pl.touk.nussknacker.engine.build.ScenarioBuilder -import pl.touk.nussknacker.engine.deployment.{DeploymentId, ExternalDeploymentId} +import pl.touk.nussknacker.engine.deployment.DeploymentId import pl.touk.nussknacker.test.base.db.WithHsqlDbTesting import pl.touk.nussknacker.test.base.it.WithClock +import pl.touk.nussknacker.test.mock.MockDeploymentManager import pl.touk.nussknacker.test.mock.MockDeploymentManagerSyntaxSugar.Ops -import pl.touk.nussknacker.test.mock.{MockDeploymentManager, TestProcessChangeListener} import pl.touk.nussknacker.test.utils.domain.TestFactory._ import pl.touk.nussknacker.test.utils.domain.{ProcessTestData, TestFactory} import pl.touk.nussknacker.test.utils.scalas.DBIOActionValues import pl.touk.nussknacker.test.{EitherValuesDetailedMessage, NuScalaTestAssertions, PatientScalaFutures} import pl.touk.nussknacker.ui.api.DeploymentCommentSettings import pl.touk.nussknacker.ui.listener.ProcessChangeEvent.{OnActionExecutionFinished, OnActionSuccess} -import pl.touk.nussknacker.ui.process.deployment.ScenarioStateProvider.FragmentStateException +import pl.touk.nussknacker.ui.process.ScenarioQuery +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.FragmentStateException import pl.touk.nussknacker.ui.process.periodic.flink.FlinkClientStub -import pl.touk.nussknacker.ui.process.processingtype.ValueWithRestriction -import pl.touk.nussknacker.ui.process.processingtype.provider.ProcessingTypeDataProvider.noCombinedDataFun -import pl.touk.nussknacker.ui.process.processingtype.provider.{ProcessingTypeDataProvider, ProcessingTypeDataState} import pl.touk.nussknacker.ui.process.repository.ProcessRepository.CreateProcessAction import pl.touk.nussknacker.ui.process.repository.{CommentValidationError, DBIOActionRunner} -import pl.touk.nussknacker.ui.process.{ScenarioQuery, ScenarioWithDetailsConversions} import pl.touk.nussknacker.ui.security.api.LoggedUser import slick.dbio.DBIOAction import java.util.UUID -import scala.concurrent.ExecutionContextExecutor import scala.concurrent.duration._ class DeploymentServiceSpec @@ -56,91 +52,33 @@ class DeploymentServiceSpec with WithClock with EitherValuesDetailedMessage { - private implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - - private implicit val system: ActorSystem = ActorSystem() - private implicit val user: LoggedUser = TestFactory.adminUser("user") - private implicit val ds: ExecutionContextExecutor = system.dispatcher - - private var deploymentManager: MockDeploymentManager = _ - override protected val dbioRunner: DBIOActionRunner = newDBIOActionRunner(testDbRef) - private val fetchingProcessRepository = newFetchingProcessRepository(testDbRef) - private val futureFetchingProcessRepository = newFutureFetchingScenarioRepository(testDbRef) - private val writeProcessRepository = newWriteProcessRepository(testDbRef, clock) - private val actionRepository = newActionProcessRepository(testDbRef) - private val activityRepository = newScenarioActivityRepository(testDbRef, clock) - - private val processingTypeDataProvider: ProcessingTypeDataProvider[DeploymentManager, Nothing] = - new ProcessingTypeDataProvider[DeploymentManager, Nothing] { - - override val state: ProcessingTypeDataState[DeploymentManager, Nothing] = - new ProcessingTypeDataState[DeploymentManager, Nothing] { - - override def all: Map[ProcessingType, ValueWithRestriction[DeploymentManager]] = Map( - "streaming" -> ValueWithRestriction.anyUser(deploymentManager) - ) - - override def getCombined: () => Nothing = noCombinedDataFun - override def stateIdentity: Any = deploymentManager - } + override protected val dbioRunner: DBIOActionRunner = newDBIOActionRunner(testDbRef) - } - - private val dmDispatcher = - new DeploymentManagerDispatcher(processingTypeDataProvider, futureFetchingProcessRepository) - - private val listener = new TestProcessChangeListener + private implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - private val scenarioStateProvider = createScenarioStateProvider(scenarioStateTimeout = None) + private implicit val user: LoggedUser = TestFactory.adminUser("user") - private val actionService = createActionService(deploymentCommentSettings = None) + private val writeProcessRepository = newWriteProcessRepository(testDbRef, clock) - private val deploymentService = createDeploymentService() + private val deploymentServiceFactory = new TestDeploymentServiceFactory(testDbRef) - private val initialVersionId = ProcessVersion.empty.versionId + import TestDeploymentServiceFactory._ + import deploymentServiceFactory._ - deploymentManager = MockDeploymentManager.create( + private val deploymentManager: MockDeploymentManager = MockDeploymentManager.create( defaultProcessStateStatus = SimpleStateStatus.Running, - deployedScenariosProvider = DefaultProcessingTypeDeployedScenariosProvider(testDbRef, "streaming"), - actionService = new DefaultProcessingTypeActionService("streaming", actionService), - scenarioActivityManager = new RepositoryBasedScenarioActivityManager(activityRepository, dbioRunner), + scenarioActivityManager = deploymentServiceFactory.deploymentManagerDependencies.scenarioActivityManager, ) - private def createDeploymentService( - deploymentCommentSettings: Option[DeploymentCommentSettings] = None, - ) = { - val actionService = createActionService(deploymentCommentSettings) - new DeploymentService( - dmDispatcher, - processValidatorByProcessingType, - TestFactory.scenarioResolverByProcessingType, - actionService, - additionalComponentConfigsByProcessingType, - ) - } - - private def createActionService(deploymentCommentSettings: Option[DeploymentCommentSettings]) = { - new ActionService( - dmDispatcher, - fetchingProcessRepository, - actionRepository, - dbioRunner, - listener, - scenarioStateProvider, - deploymentCommentSettings, - clock - ) - } - - private def createScenarioStateProvider(scenarioStateTimeout: Option[FiniteDuration]) = - ScenarioStateProvider(dmDispatcher, fetchingProcessRepository, actionRepository, dbioRunner, scenarioStateTimeout) + val TestDeploymentServiceServices(scenarioStatusProvider, actionService, deploymentService, reconciler) = + deploymentServiceFactory.create(deploymentManager) // TODO: temporary step - we would like to extract the validation and the comment validation tests to external validators private def createDeploymentServiceWithCommentSettings = { val commentSettings = DeploymentCommentSettings.unsafe(".+", Option("sampleComment")) - val deploymentServiceWithCommentSettings = - createDeploymentService(deploymentCommentSettings = Some(commentSettings)) - deploymentServiceWithCommentSettings + deploymentServiceFactory + .create(deploymentManager, deploymentCommentSettings = Some(commentSettings)) + .deploymentService } test("should return error when trying to deploy without comment when comment is required") { @@ -165,7 +103,8 @@ class DeploymentServiceSpec result.getMessage.trim shouldBe "Comment is required." eventually { - val inProgressActions = actionRepository.getInProgressActionNames(processIdWithName.id).dbioActionValues + val inProgressActions = + actionRepository.getInProgressActionNames(processIdWithName.id).dbioActionValues inProgressActions should have size 0 } } @@ -185,10 +124,9 @@ class DeploymentServiceSpec ) eventually { - val status = scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) + val status = scenarioStatusProvider + .getScenarioStatus(processIdWithName) .futureValue - .status status should not be SimpleStateStatus.Running @@ -216,10 +154,7 @@ class DeploymentServiceSpec ) eventually { - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Running + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe SimpleStateStatus.Running } } @@ -236,10 +171,7 @@ class DeploymentServiceSpec .futureValue eventually { - val status = scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status + val status = scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue status should not be SimpleStateStatus.Canceled @@ -267,17 +199,11 @@ class DeploymentServiceSpec ) ) .futureValue - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.DuringDeploy + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe SimpleStateStatus.DuringDeploy } eventually { - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Running + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe SimpleStateStatus.Running } } @@ -288,10 +214,7 @@ class DeploymentServiceSpec deploymentManager.withWaitForCancelFinish { deploymentService.processCommand(CancelScenarioCommand(CommonCommandData(processId, None, user))) eventually { - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.DuringCancel + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.DuringCancel } } } @@ -315,7 +238,7 @@ class DeploymentServiceSpec val (processId, deployActionId) = prepareDeployedProcess(processName).dbioActionValues checkIsFollowingDeploy( - scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue, + scenarioStatusProvider.getScenarioStatus(processId).futureValue, expected = true ) fetchingProcessRepository @@ -325,20 +248,7 @@ class DeploymentServiceSpec .lastStateAction should not be None deploymentManager.withProcessFinished(processName, DeploymentId.fromActionId(deployActionId)) { - // we simulate what happens when retrieveStatus is called multiple times to check only one comment is added - (1 to 5).foreach { _ => - checkIsFollowingDeploy( - scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue, - expected = false - ) - } - val finishedStatus = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - finishedStatus.status shouldBe SimpleStateStatus.Finished - finishedStatus.allowedActions shouldBe List( - ScenarioActionName.Deploy, - ScenarioActionName.Archive, - ScenarioActionName.Rename - ) + reconciler.synchronizeEngineFinishedDeploymentsLocalStatuses().futureValue } val processDetails = @@ -352,15 +262,12 @@ class DeploymentServiceSpec deploymentManager.withEmptyProcessState(processName) { val stateAfterJobRetention = - scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - stateAfterJobRetention.status shouldBe SimpleStateStatus.Finished + scenarioStatusProvider.getScenarioStatus(processId).futureValue + stateAfterJobRetention shouldBe SimpleStateStatus.Finished } archiveProcess(processId).dbioActionValues - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Finished + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.Finished } test("Should finish deployment only after DeploymentManager finishes") { @@ -373,10 +280,7 @@ class DeploymentServiceSpec .dbioActionValues .flatMap(_.lastStateAction) .map(_.actionName) shouldBe expectedAction - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe expectedStatus + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe expectedStatus } deploymentManager.withEmptyProcessState(processName) { @@ -458,10 +362,7 @@ class DeploymentServiceSpec listener.events shouldBe Symbol("empty") // during short period of time, status will be during deploy - because parallelism validation are done in the same critical section as deployment eventually { - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.NotDeployed + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe SimpleStateStatus.NotDeployed } } } @@ -471,10 +372,7 @@ class DeploymentServiceSpec val (processId, _) = prepareCanceledProcess(processName).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Canceled) { - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Canceled + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.Canceled } } @@ -489,10 +387,7 @@ class DeploymentServiceSpec .lastStateAction should not be None deploymentManager.withEmptyProcessState(processName) { - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Canceled + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.Canceled } val processDetails = @@ -511,10 +406,7 @@ class DeploymentServiceSpec .lastStateAction should not be None deploymentManager.withEmptyProcessState(processName) { - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.Canceled + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.Canceled } val processDetails = @@ -527,13 +419,11 @@ class DeploymentServiceSpec val (processId, _) = prepareCanceledProcess(processName).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.shouldNotBeRunning(true) - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -542,8 +432,8 @@ class DeploymentServiceSpec val processId = prepareProcess(processName).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.NotDeployed + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.NotDeployed } } @@ -552,9 +442,9 @@ class DeploymentServiceSpec val (processId, _) = prepareCanceledProcess(processName).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.DuringCancel) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue - state.status shouldBe SimpleStateStatus.DuringCancel + state shouldBe SimpleStateStatus.DuringCancel } } @@ -563,14 +453,17 @@ class DeploymentServiceSpec val (processId, _) = prepareDeployedProcess(processName).dbioActionValues val state = - StatusDetails(SimpleStateStatus.Restarting, None, Some(ExternalDeploymentId("12")), Some(ProcessVersion.empty)) + DeploymentStatusDetails( + status = SimpleStateStatus.Restarting, + deploymentId = None, + version = Some(VersionId.initialVersionId) + ) deploymentManager.withProcessStates(processName, List(state)) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue - state.status shouldBe SimpleStateStatus.Restarting - state.allowedActions shouldBe List(ScenarioActionName.Cancel) - state.description shouldBe "Scenario is restarting..." + state shouldBe SimpleStateStatus.Restarting + getAllowedActions(state) shouldBe Set(ScenarioActionName.Cancel) } } @@ -579,13 +472,11 @@ class DeploymentServiceSpec val (processId, _) = prepareDeployedProcess(processName).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Canceled) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin") - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -594,61 +485,39 @@ class DeploymentServiceSpec val (processId, _) = prepareDeployedProcess(processName).dbioActionValues deploymentManager.withEmptyProcessState(processName) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.shouldBeRunning(VersionId(1L), "admin") - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } test("Should return error state when state is running and process is deployed with mismatch versions") { val processName: ProcessName = generateProcessName val (processId, _) = prepareDeployedProcess(processName).dbioActionValues - val version = Some( - ProcessVersion( - versionId = VersionId(2), - processId = ProcessId(1), - processName = ProcessName(""), - labels = List.empty, - user = "other", - modelVersion = None - ) - ) + val version = Some(VersionId(2)) deploymentManager.withProcessStateVersion(processName, SimpleStateStatus.Running, version) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.mismatchDeployedVersion(VersionId(2L), VersionId(1L), "admin") - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } test("Should always return process manager failure, even if some other verifications return invalid") { val processName: ProcessName = generateProcessName val (processId, _) = prepareDeployedProcess(processName).dbioActionValues - val version = Some( - ProcessVersion( - versionId = VersionId(2), - processId = ProcessId(1), - processName = ProcessName(""), - labels = List.empty, - user = "", - modelVersion = None - ) - ) + val version = Some(VersionId(2)) // FIXME: doesnt check recover from failed verifications ??? deploymentManager.withProcessStateVersion(processName, ProblemStateStatus.Failed, version) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue - state.status shouldBe ProblemStateStatus.Failed - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) + state shouldBe ProblemStateStatus.Failed + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -657,13 +526,11 @@ class DeploymentServiceSpec val (processId, _) = prepareDeployedProcess(processName).dbioActionValues deploymentManager.withProcessStateVersion(processName, SimpleStateStatus.Running, Option.empty) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.missingDeployedVersion(VersionId(1L), "admin") - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -673,13 +540,11 @@ class DeploymentServiceSpec // FIXME: doesnt check recover from failed future of findJobStatus ??? deploymentManager.withProcessStateVersion(processName, ProblemStateStatus.FailedToGet, Option.empty) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.FailedToGet - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -693,10 +558,9 @@ class DeploymentServiceSpec .lastStateAction shouldBe None deploymentManager.withEmptyProcessState(processName) { - scenarioStateProvider - .getProcessState(ProcessIdWithName(processId.id, processName), Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.NotDeployed + scenarioStatusProvider + .getScenarioStatus(ProcessIdWithName(processId.id, processName)) + .futureValue shouldBe SimpleStateStatus.NotDeployed } val processDetails = @@ -715,10 +579,7 @@ class DeploymentServiceSpec .lastStateAction shouldBe None deploymentManager.withEmptyProcessState(processName) { - scenarioStateProvider - .getProcessState(processId, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.NotDeployed + scenarioStatusProvider.getScenarioStatus(processId).futureValue shouldBe SimpleStateStatus.NotDeployed } val processDetails = @@ -737,10 +598,9 @@ class DeploymentServiceSpec .lastStateAction shouldBe None deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - scenarioStateProvider - .getProcessState(ProcessIdWithName(processId.id, processName), Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.NotDeployed + scenarioStatusProvider + .getScenarioStatus(ProcessIdWithName(processId.id, processName)) + .futureValue shouldBe SimpleStateStatus.NotDeployed } val processDetails = @@ -753,8 +613,8 @@ class DeploymentServiceSpec val processName: ProcessName = generateProcessName val (processId, _) = prepareArchivedProcess(processName, None).dbioActionValues - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.NotDeployed + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.NotDeployed } test( @@ -764,8 +624,8 @@ class DeploymentServiceSpec val (processId, _) = prepareArchivedProcess(processName, None).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.NotDeployed + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.NotDeployed } } @@ -773,8 +633,8 @@ class DeploymentServiceSpec val processName: ProcessName = generateProcessName val (processId, _) = prepareArchivedProcess(processName, Some(Cancel)).dbioActionValues - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.Canceled + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.Canceled } test("Should return canceled status for archived canceled process with running state (it should never happen)") { @@ -782,8 +642,8 @@ class DeploymentServiceSpec val (processId, _) = prepareArchivedProcess(processName, Some(Cancel)).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.Canceled + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.Canceled } } @@ -791,8 +651,8 @@ class DeploymentServiceSpec val processName: ProcessName = generateProcessName val (processId, _) = preparedUnArchivedProcess(processName, None).dbioActionValues - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.NotDeployed + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.NotDeployed } test("Should return during deploy for process in deploy in progress") { @@ -802,43 +662,43 @@ class DeploymentServiceSpec .addInProgressAction(processId.id, ScenarioActionName.Deploy, Some(VersionId(1))) .dbioActionValues - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.DuringDeploy + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.DuringDeploy } - test("Should enrich BaseProcessDetails") { - prepareProcessesInProgress + test("Should getScenariosStatuses bulk with the same result as for single scenario") { + val (_, _, runningScenarioId) = prepareScenariosInVariousStates val processesDetails = fetchingProcessRepository .fetchLatestProcessesDetails[Unit](ScenarioQuery.empty) .dbioActionValues - val processesDetailsWithState = scenarioStateProvider - .enrichDetailsWithProcessState( - processesDetails - .map( - ScenarioWithDetailsConversions - .fromEntityIgnoringGraphAndValidationResult(_, ProcessTestData.sampleScenarioParameters) - ) - ) - .futureValue - - val statesBasedOnCachedInProgressActionTypes = processesDetailsWithState.map(_.state) + deploymentManager.withProcessRunning(runningScenarioId.name) { + val statesBasedOnCachedInProgressActionTypes = scenarioStatusProvider + .getScenariosStatuses(processesDetails) + .futureValue + .map(_.map(_.name)) - statesBasedOnCachedInProgressActionTypes.map(_.map(_.status.name)) shouldBe List( - Some("DURING_DEPLOY"), - Some("DURING_CANCEL"), - Some("RUNNING"), - None - ) + statesBasedOnCachedInProgressActionTypes shouldBe List( + Some("DURING_DEPLOY"), + Some("DURING_CANCEL"), + Some("RUNNING"), + None + ) - val statesBasedOnNotCachedInProgressActionTypes = - processesDetails - .map(pd => Option(pd).filterNot(_.isFragment).map(scenarioStateProvider.getProcessState).sequence) - .sequence - .futureValue + val statesBasedOnNotCachedInProgressActionTypes = + processesDetails + .map(pd => + Option(pd) + .filterNot(_.isFragment) + .map(scenarioStatusProvider.getAllowedActionsForScenarioStatus(_).map(_.scenarioStatus.name)) + .sequence + ) + .sequence + .futureValue - statesBasedOnCachedInProgressActionTypes shouldEqual statesBasedOnNotCachedInProgressActionTypes + statesBasedOnCachedInProgressActionTypes shouldEqual statesBasedOnNotCachedInProgressActionTypes + } } test( @@ -848,8 +708,8 @@ class DeploymentServiceSpec val (processId, _) = prepareArchivedProcess(processName, None).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.NotDeployed + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.NotDeployed } } @@ -857,8 +717,8 @@ class DeploymentServiceSpec val processName: ProcessName = generateProcessName val (processId, _) = prepareArchivedProcess(processName, Some(Deploy)).dbioActionValues - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe ProblemStateStatus.ArchivedShouldBeCanceled + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe ProblemStateStatus.ArchivedShouldBeCanceled } test("Should return canceled status for unarchived process") { @@ -866,8 +726,8 @@ class DeploymentServiceSpec val (processId, _) = prepareArchivedProcess(processName, Some(Cancel)).dbioActionValues deploymentManager.withEmptyProcessState(processName) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue - state.status shouldBe SimpleStateStatus.Canceled + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue + state shouldBe SimpleStateStatus.Canceled } } @@ -876,12 +736,10 @@ class DeploymentServiceSpec val (processId, _) = preparedUnArchivedProcess(processName, Some(Cancel)).dbioActionValues deploymentManager.withProcessStateStatus(processName, SimpleStateStatus.Running) { - val state = scenarioStateProvider.getProcessState(processId, Some(initialVersionId)).futureValue + val state = scenarioStatusProvider.getScenarioStatus(processId).futureValue val expectedStatus = ProblemStateStatus.shouldNotBeRunning(true) - state.status shouldBe expectedStatus - state.icon shouldBe ProblemStateStatus.icon - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Cancel) - state.description shouldBe expectedStatus.description + state shouldBe expectedStatus + getAllowedActions(state) shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Cancel) } } @@ -891,10 +749,7 @@ class DeploymentServiceSpec deploymentManager.withEmptyProcessState(processName) { val initialStatus = SimpleStateStatus.NotDeployed - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe initialStatus + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe initialStatus deploymentManager.withWaitForDeployFinish(processName) { deploymentService .processCommand( @@ -905,16 +760,10 @@ class DeploymentServiceSpec ) ) .futureValue - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe SimpleStateStatus.DuringDeploy + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe SimpleStateStatus.DuringDeploy actionService.invalidateInProgressActions() - scenarioStateProvider - .getProcessState(processIdWithName, Some(initialVersionId)) - .futureValue - .status shouldBe initialStatus + scenarioStatusProvider.getScenarioStatus(processIdWithName).futureValue shouldBe initialStatus } } } @@ -923,15 +772,15 @@ class DeploymentServiceSpec val processName: ProcessName = generateProcessName val (processId, _) = prepareDeployedProcess(processName).dbioActionValues - val timeout = 1.second - val serviceWithTimeout = createScenarioStateProvider(Some(timeout)) + val timeout = 1.second + val serviceWithTimeout = + deploymentServiceFactory.create(deploymentManager, scenarioStateTimeout = Some(timeout)).scenarioStatusProvider val durationLongerThanTimeout = timeout.plus(patienceConfig.timeout) deploymentManager.withDelayBeforeStateReturn(durationLongerThanTimeout) { val status = serviceWithTimeout - .getProcessState(processId, Some(initialVersionId)) + .getScenarioStatus(processId) .futureValueEnsuringInnerException(durationLongerThanTimeout) - .status status shouldBe ProblemStateStatus.FailedToGet } } @@ -941,7 +790,7 @@ class DeploymentServiceSpec val id = prepareFragment(processName).dbioActionValues assertThrowsWithParent[FragmentStateException.type] { - scenarioStateProvider.getProcessState(id, Some(initialVersionId)).futureValue + scenarioStatusProvider.getScenarioStatus(id).futureValue } } @@ -951,9 +800,9 @@ class DeploymentServiceSpec deploymentManager.deploys.clear() } - private def checkIsFollowingDeploy(state: ProcessState, expected: Boolean) = { - withClue(state) { - SimpleStateStatus.DefaultFollowingDeployStatuses.contains(state.status) shouldBe expected + private def checkIsFollowingDeploy(status: StateStatus, expected: Boolean) = { + withClue(status) { + SimpleStateStatus.DefaultFollowingDeployStatuses.contains(status) shouldBe expected } } @@ -976,7 +825,12 @@ class DeploymentServiceSpec for { (processId, actionIdOpt) <- prepareArchivedProcess(processName, actionNameOpt) _ <- writeProcessRepository.archive(processId = processId, isArchived = false) - _ <- actionRepository.addInstantAction(processId.id, initialVersionId, ScenarioActionName.UnArchive, None) + _ <- actionRepository.addInstantAction( + processId.id, + VersionId.initialVersionId, + ScenarioActionName.UnArchive, + None + ) } yield (processId, actionIdOpt) private def prepareArchivedProcess( @@ -992,14 +846,16 @@ class DeploymentServiceSpec private def archiveProcess(processId: ProcessIdWithName): DB[_] = { writeProcessRepository .archive(processId = processId, isArchived = true) - .flatMap(_ => actionRepository.addInstantAction(processId.id, initialVersionId, ScenarioActionName.Archive, None)) + .flatMap(_ => + actionRepository.addInstantAction(processId.id, VersionId.initialVersionId, ScenarioActionName.Archive, None) + ) } - private def prepareProcessesInProgress = { + private def prepareScenariosInVariousStates = { val duringDeployProcessName :: duringCancelProcessName :: otherProcess :: fragmentName :: Nil = (1 to 4).map(_ => generateProcessName).toList - val processIdsInProgress = for { + (for { (duringDeployProcessId, _) <- preparedUnArchivedProcess(duringDeployProcessName, None) (duringCancelProcessId, _) <- prepareDeployedProcess(duringCancelProcessName) _ <- actionRepository @@ -1014,12 +870,9 @@ class DeploymentServiceSpec ScenarioActionName.Cancel, Some(VersionId.initialVersionId), ) - _ <- prepareDeployedProcess(otherProcess) - _ <- prepareFragment(fragmentName) - } yield (duringDeployProcessId, duringCancelProcessId) - - val (duringDeployProcessId, duringCancelProcessId) = processIdsInProgress.dbioActionValues - (duringDeployProcessId, duringCancelProcessId) + runningScenario <- prepareDeployedProcess(otherProcess) + _ <- prepareFragment(fragmentName) + } yield (duringDeployProcessId, duringCancelProcessId, runningScenario._1)).dbioActionValues } private def prepareProcessWithAction( @@ -1034,7 +887,7 @@ class DeploymentServiceSpec private def prepareAction(processId: ProcessId, actionName: ScenarioActionName) = { val comment = Comment.from(actionName.toString.capitalize) - actionRepository.addInstantAction(processId, initialVersionId, actionName, comment).map(_.id) + actionRepository.addInstantAction(processId, VersionId.initialVersionId, actionName, comment).map(_.id) } private def prepareProcess(processName: ProcessName, parallelism: Option[Int] = None): DB[ProcessIdWithName] = { @@ -1077,4 +930,12 @@ class DeploymentServiceSpec ProcessName("proces_" + UUID.randomUUID()) } + private def getAllowedActions(status: StateStatus) = deploymentManager.processStateDefinitionManager.statusActions( + ScenarioStatusWithScenarioContext( + scenarioStatus = status, + deployedVersionId = None, + currentlyPresentedVersionId = None + ) + ) + } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/TestDeploymentServiceFactory.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/TestDeploymentServiceFactory.scala new file mode 100644 index 00000000000..e771ed1cfad --- /dev/null +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/TestDeploymentServiceFactory.scala @@ -0,0 +1,130 @@ +package pl.touk.nussknacker.ui.process.deployment + +import akka.actor.ActorSystem +import cats.effect.unsafe.IORuntime +import db.util.DBIOActionInstances.DB +import pl.touk.nussknacker.engine.api.deployment.{ + DeploymentManager, + ProcessingTypeActionServiceStub, + ProcessingTypeDeployedScenariosProviderStub +} +import pl.touk.nussknacker.engine.compile.ProcessValidator +import pl.touk.nussknacker.engine.{DeploymentManagerDependencies, ModelData} +import pl.touk.nussknacker.test.config.WithSimplifiedDesignerConfig.TestProcessingType +import pl.touk.nussknacker.test.config.WithSimplifiedDesignerConfig.TestProcessingType.Streaming +import pl.touk.nussknacker.test.mock.{StubModelDataWithModelDefinition, TestProcessChangeListener} +import pl.touk.nussknacker.test.utils.domain.ProcessTestData.modelDefinition +import pl.touk.nussknacker.test.utils.domain.TestFactory._ +import pl.touk.nussknacker.test.utils.domain.{ProcessTestData, TestFactory} +import pl.touk.nussknacker.ui.api.DeploymentCommentSettings +import pl.touk.nussknacker.ui.db.DbRef +import pl.touk.nussknacker.ui.process.deployment.TestDeploymentServiceFactory.{actorSystem, clock, ec, processingType} +import pl.touk.nussknacker.ui.process.deployment.deploymentstatus.EngineSideDeploymentStatusesProvider +import pl.touk.nussknacker.ui.process.deployment.reconciliation.ScenarioDeploymentReconciler +import pl.touk.nussknacker.ui.process.deployment.scenariostatus.ScenarioStatusProvider +import pl.touk.nussknacker.ui.process.processingtype.ValueWithRestriction +import pl.touk.nussknacker.ui.process.processingtype.provider.ProcessingTypeDataProvider +import pl.touk.nussknacker.ui.process.repository.activities.ScenarioActivityRepository +import pl.touk.nussknacker.ui.process.repository.{DBFetchingProcessRepository, ScenarioActionRepository} +import sttp.client3.testing.SttpBackendStub + +import java.time.Clock +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.FiniteDuration + +class TestDeploymentServiceFactory(dbRef: DbRef) { + + private val dbioRunner = newDBIOActionRunner(dbRef) + val fetchingProcessRepository: DBFetchingProcessRepository[DB] = newFetchingProcessRepository(dbRef) + val activityRepository: ScenarioActivityRepository = newScenarioActivityRepository(dbRef, clock) + val actionRepository: ScenarioActionRepository = newActionProcessRepository(dbRef) + val listener = new TestProcessChangeListener + + val deploymentManagerDependencies: DeploymentManagerDependencies = DeploymentManagerDependencies( + new ProcessingTypeDeployedScenariosProviderStub(List.empty), + new ProcessingTypeActionServiceStub, + new RepositoryBasedScenarioActivityManager(activityRepository, dbioRunner), + ec, + IORuntime.global, + actorSystem, + SttpBackendStub.asynchronousFuture + ) + + def create( + deploymentManager: DeploymentManager, + modelData: ModelData = new StubModelDataWithModelDefinition(modelDefinition()), + scenarioStateTimeout: Option[FiniteDuration] = None, + deploymentCommentSettings: Option[DeploymentCommentSettings] = None + ): TestDeploymentServiceServices = { + val processingTypeDataProvider = ProcessingTypeDataProvider.withEmptyCombinedData( + Map(processingType.stringify -> ValueWithRestriction.anyUser(deploymentManager)) + ) + + val dmDispatcher = { + val futureFetchingProcessRepository = newFutureFetchingScenarioRepository(dbRef) + new DeploymentManagerDispatcher(processingTypeDataProvider, futureFetchingProcessRepository) + } + + val deploymentsStatusesProvider = + new EngineSideDeploymentStatusesProvider(dmDispatcher, scenarioStateTimeout) + + val scenarioStatusProvider = { + new ScenarioStatusProvider( + deploymentsStatusesProvider, + dmDispatcher, + fetchingProcessRepository, + actionRepository, + dbioRunner + ) + } + + val actionService = { + new ActionService( + fetchingProcessRepository, + actionRepository, + dbioRunner, + listener, + scenarioStatusProvider, + deploymentCommentSettings, + clock + ) + } + + val deploymentsReconciler = + new ScenarioDeploymentReconciler( + Set(processingType.stringify), + deploymentsStatusesProvider, + actionRepository, + dbioRunner + ) + + val deploymentService = new DeploymentService( + dmDispatcher, + TestFactory.mapProcessingTypeDataProvider( + Streaming.stringify -> ProcessTestData.testProcessValidator(validator = ProcessValidator.default(modelData)) + ), + TestFactory.scenarioResolverByProcessingType, + actionService, + additionalComponentConfigsByProcessingType, + ) + TestDeploymentServiceServices(scenarioStatusProvider, actionService, deploymentService, deploymentsReconciler) + } + +} + +case class TestDeploymentServiceServices( + scenarioStatusProvider: ScenarioStatusProvider, + actionService: ActionService, + deploymentService: DeploymentService, + scenarioDeploymentReconciler: ScenarioDeploymentReconciler +) + +object TestDeploymentServiceFactory { + + implicit val actorSystem: ActorSystem = ActorSystem("TestDeploymentServiceFactory") + implicit val ec: ExecutionContext = actorSystem.dispatcher + val clock: Clock = Clock.systemUTC() + + val processingType: TestProcessingType = TestProcessingType.Streaming + +} diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetectorTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetectorTest.scala new file mode 100644 index 00000000000..20abb1fc181 --- /dev/null +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/deployment/scenariostatus/InconsistentStateDetectorTest.scala @@ -0,0 +1,98 @@ +package pl.touk.nussknacker.ui.process.deployment.scenariostatus + +import org.scalatest.funsuite.AnyFunSuiteLike +import org.scalatest.matchers.should.Matchers +import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatusDetails, ScenarioActionName} +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus +import pl.touk.nussknacker.engine.deployment.DeploymentId + +import java.util.UUID + +// TODO: more unit tests, tests on higher level than (resolveScenarioStatus) not extractAtMostOneStatus +class InconsistentStateDetectorTest extends AnyFunSuiteLike with Matchers { + + test("return failed status if two deployments running") { + val firstDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Running, Some(DeploymentId(UUID.randomUUID().toString)), None) + val secondDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Running, Some(DeploymentId(UUID.randomUUID().toString)), None) + + InconsistentStateDetector.extractAtMostOneStatus(List(firstDeploymentStatus, secondDeploymentStatus)) shouldBe Some( + DeploymentStatusDetails( + ProblemStateStatus( + description = "More than one deployment is running.", + allowedActions = Set(ScenarioActionName.Cancel), + tooltip = Some( + s"Expected one job, instead: ${firstDeploymentStatus.deploymentIdUnsafe} - RUNNING, ${secondDeploymentStatus.deploymentIdUnsafe} - RUNNING" + ), + ), + firstDeploymentStatus.deploymentId, + None + ) + ) + } + + test("return failed status if two in non-terminal state") { + val firstDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Running, Some(DeploymentId(UUID.randomUUID().toString)), None) + val secondDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Restarting, Some(DeploymentId(UUID.randomUUID().toString)), None) + + InconsistentStateDetector.extractAtMostOneStatus(List(firstDeploymentStatus, secondDeploymentStatus)) shouldBe Some( + DeploymentStatusDetails( + ProblemStateStatus( + description = "More than one deployment is running.", + allowedActions = Set(ScenarioActionName.Cancel), + tooltip = Some( + s"Expected one job, instead: ${firstDeploymentStatus.deploymentIdUnsafe} - RUNNING, ${secondDeploymentStatus.deploymentIdUnsafe} - RESTARTING" + ), + ), + firstDeploymentStatus.deploymentId, + None + ) + ) + } + + test("return running status if cancelled job has last-modification date later then running job") { + val runningDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Running, Some(DeploymentId(UUID.randomUUID().toString)), None) + val canceledDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Canceled, Some(DeploymentId(UUID.randomUUID().toString)), None) + val duringCancelDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.DuringCancel, Some(DeploymentId(UUID.randomUUID().toString)), None) + + InconsistentStateDetector.extractAtMostOneStatus( + List(runningDeploymentStatus, canceledDeploymentStatus, duringCancelDeploymentStatus) + ) shouldBe Some( + DeploymentStatusDetails( + SimpleStateStatus.Running, + runningDeploymentStatus.deploymentId, + None, + ) + ) + } + + test("return last terminal state if not running") { + val firstFinishedDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Finished, Some(DeploymentId(UUID.randomUUID().toString)), None) + val secondFinishedDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Finished, Some(DeploymentId(UUID.randomUUID().toString)), None) + + InconsistentStateDetector.extractAtMostOneStatus( + List(firstFinishedDeploymentStatus, secondFinishedDeploymentStatus) + ) shouldBe Some(firstFinishedDeploymentStatus) + } + + test("return non-terminal state if not running") { + val finishedDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Finished, Some(DeploymentId(UUID.randomUUID().toString)), None) + val nonTerminalDeploymentStatus = + DeploymentStatusDetails(SimpleStateStatus.Restarting, Some(DeploymentId(UUID.randomUUID().toString)), None) + + InconsistentStateDetector.extractAtMostOneStatus( + List(finishedDeploymentStatus, nonTerminalDeploymentStatus) + ) shouldBe Some(nonTerminalDeploymentStatus) + } + +} diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessServiceIntegrationTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessServiceIntegrationTest.scala index 3fb99e944f4..3d27ba84d8c 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessServiceIntegrationTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessServiceIntegrationTest.scala @@ -30,7 +30,7 @@ import pl.touk.nussknacker.test.base.it.WithClock import pl.touk.nussknacker.test.utils.domain.TestFactory import pl.touk.nussknacker.test.utils.domain.TestFactory.newWriteProcessRepository import pl.touk.nussknacker.test.utils.scalas.DBIOActionValues -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicProcessStatus +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicScenarioStatus import pl.touk.nussknacker.ui.process.periodic.flink.{DeploymentManagerStub, ScheduledExecutionPerformerStub} import pl.touk.nussknacker.ui.process.periodic.legacy.db.{LegacyDbInitializer, SlickLegacyPeriodicProcessesRepository} import pl.touk.nussknacker.ui.process.periodic.model._ @@ -299,7 +299,7 @@ class PeriodicProcessServiceIntegrationTest ) afterDeployDeployment.runAt shouldBe localTime(expectedScheduleTime) - f.delegateDeploymentManagerStub.setStateStatus( + f.delegateDeploymentManagerStub.setDeploymentStatus( processName, SimpleStateStatus.Finished, Some(afterDeployDeployment.id) @@ -392,7 +392,7 @@ class PeriodicProcessServiceIntegrationTest // finish all stateAfterDeploy.values.foreach(schedulesState => { val deployment = schedulesState.firstScheduleData.latestDeployments.head - f.delegateDeploymentManagerStub.setStateStatus( + f.delegateDeploymentManagerStub.setDeploymentStatus( processName, SimpleStateStatus.Finished, Some(deployment.id) @@ -565,12 +565,12 @@ class PeriodicProcessServiceIntegrationTest val deployment = toDeploy.find(_.scheduleName.value.contains(firstSchedule)).value service.deploy(deployment).futureValue - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Running, Some(deployment.id)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Running, Some(deployment.id)) val toDeployAfterDeploy = service.findToBeDeployed.futureValue toDeployAfterDeploy should have length 0 - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deployment.id)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deployment.id)) service.handleFinished.futureValue val toDeployAfterFinish = service.findToBeDeployed.futureValue @@ -669,14 +669,19 @@ class PeriodicProcessServiceIntegrationTest val timeToTriggerSchedule1 = startTime.plus(1, ChronoUnit.HOURS) val timeToTriggerSchedule2 = startTime.plus(2, ChronoUnit.HOURS) - def mostImportantActiveDeployment = service - .getStatusDetails(processName) - .futureValue - .value - .status - .asInstanceOf[PeriodicProcessStatus] - .pickMostImportantActiveDeployment - .value + def mostImportantActiveDeployment = { + PeriodicProcessService + .pickMostImportantActiveDeployment( + service + .getMergedStatusDetails(processName) + .futureValue + .value + .status + .asInstanceOf[PeriodicScenarioStatus] + .activeDeploymentsStatuses + ) + .value + } val schedule1 = "schedule1" val schedule2 = "schedule2" @@ -797,7 +802,7 @@ class PeriodicProcessServiceIntegrationTest toDeploy should have length 1 val deployment = toDeploy.head service.deploy(deployment).futureValue - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deployment.id)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deployment.id)) tryWithFailedListener { () => service.deactivate(processName) @@ -829,7 +834,7 @@ class PeriodicProcessServiceIntegrationTest val deployment = toDeploy.head service.deploy(deployment).futureValue - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deployment.id)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deployment.id)) // this one is cyclically called by RescheduleActor service.handleFinished.futureValue diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessesFetchingTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessesFetchingTest.scala index b296f1271ef..f8312cac91c 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessesFetchingTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/PeriodicProcessesFetchingTest.scala @@ -88,7 +88,7 @@ class PeriodicProcessesFetchingTest implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.CanBeCached for (i <- 1 to n) { - f.periodicProcessService.getStatusDetails(processName(i)).futureValue + f.periodicProcessService.getMergedStatusDetails(processName(i)).futureValue } getLatestDeploymentQueryCount.get() shouldEqual 2 * n @@ -112,9 +112,9 @@ class PeriodicProcessesFetchingTest implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - val statuses = f.periodicProcessService.stateQueryForAllScenariosSupport - .asInstanceOf[StateQueryForAllScenariosSupported] - .getAllProcessesStates() + val statuses = f.periodicProcessService.deploymentsStatusesQueryForAllScenariosSupport + .asInstanceOf[DeploymentsStatusesQueryForAllScenariosSupported] + .getAllScenariosDeploymentsStatuses() .futureValue .value diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/DeploymentManagerStub.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/DeploymentManagerStub.scala index 3aa6e2c0e8b..492a2948832 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/DeploymentManagerStub.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/DeploymentManagerStub.scala @@ -1,20 +1,18 @@ package pl.touk.nussknacker.ui.process.periodic.flink import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} -import pl.touk.nussknacker.engine.deployment.{DeploymentId, ExternalDeploymentId} -import pl.touk.nussknacker.engine.testing.StubbingCommands +import pl.touk.nussknacker.engine.api.process.ProcessName +import pl.touk.nussknacker.engine.deployment.DeploymentId import pl.touk.nussknacker.ui.process.periodic.model.PeriodicProcessDeploymentId import scala.collection.concurrent.TrieMap import scala.concurrent.Future -class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands { +class DeploymentManagerStub extends BaseDeploymentManager { - val jobStatus: TrieMap[ProcessName, List[StatusDetails]] = TrieMap.empty + val jobStatus: TrieMap[ProcessName, List[DeploymentStatusDetails]] = TrieMap.empty - def getJobStatus(processName: ProcessName): Option[List[StatusDetails]] = { + def getJobStatus(processName: ProcessName): Option[List[DeploymentStatusDetails]] = { jobStatus.get(processName) } @@ -30,20 +28,16 @@ class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands jobStatus.put( processName, List( - StatusDetails( + DeploymentStatusDetails( deploymentId = deploymentIdOpt.map(pdid => DeploymentId(pdid.toString)), - externalDeploymentId = Some(ExternalDeploymentId("1")), status = status, version = None, - startTime = None, - attributes = None, - errors = Nil ) ) ) } - def setStateStatus( + def setDeploymentStatus( processName: ProcessName, status: StateStatus, deploymentIdOpt: Option[PeriodicProcessDeploymentId] @@ -51,54 +45,43 @@ class DeploymentManagerStub extends BaseDeploymentManager with StubbingCommands jobStatus.put( processName, List( - StatusDetails( + DeploymentStatusDetails( deploymentId = deploymentIdOpt.map(pdid => DeploymentId(pdid.toString)), - externalDeploymentId = Some(ExternalDeploymentId("1")), status = status, version = None, - startTime = None, - attributes = None, - errors = Nil ) ) ) } - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = - Future.successful( - processStateDefinitionManager.processState( - statusDetails.headOption.getOrElse(StatusDetails(SimpleStateStatus.NotDeployed, None)), - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - ) - - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - Future.successful(WithDataFreshnessStatus.fresh(getJobStatus(name).toList.flatten)) + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + Future.successful(WithDataFreshnessStatus.fresh(getJobStatus(scenarioName).toList.flatten)) } override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = - new StateQueryForAllScenariosSupported { + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + new DeploymentsStatusesQueryForAllScenariosSupported { - override def getAllProcessesStates()( + override def getAllScenariosDeploymentsStatuses()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]] = + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]] = Future.successful(WithDataFreshnessStatus.fresh(jobStatus.toMap)) } + override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { + case _: DMValidateScenarioCommand => Future.successful(()) + case _: DMRunDeploymentCommand => Future.successful(None) + case _: DMCancelScenarioCommand => Future.successful(()) + case _: DMCancelDeploymentCommand => Future.successful(()) + case _: DMStopScenarioCommand | _: DMStopDeploymentCommand | _: DMMakeScenarioSavepointCommand | + _: DMRunOffScheduleCommand | _: DMTestScenarioCommand => + notImplemented + } + } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/FlinkClientStub.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/FlinkClientStub.scala index 56c72dcc2eb..5b5ae61654c 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/FlinkClientStub.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/FlinkClientStub.scala @@ -1,8 +1,14 @@ package pl.touk.nussknacker.ui.process.periodic.flink +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.Configuration import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, SavepointResult, WithDataFreshnessStatus} -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId +import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{ + ClusterOverview, + ExecutionConfig, + JobDetails, + JobOverview +} import pl.touk.nussknacker.engine.management.rest.{FlinkClient, flinkRestModel} import java.io.File @@ -16,22 +22,22 @@ object FlinkClientStub extends FlinkClient { override def getJobsOverviews()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[List[flinkRestModel.JobOverview]]] = + ): Future[WithDataFreshnessStatus[List[JobOverview]]] = Future.successful(WithDataFreshnessStatus.fresh(List.empty)) - override def getJobDetails(jobId: String): Future[Option[flinkRestModel.JobDetails]] = Future.successful(None) + override def getJobDetails(jobId: JobID): Future[Option[JobDetails]] = Future.successful(None) - override def getJobConfig(jobId: String): Future[flinkRestModel.ExecutionConfig] = - Future.successful(flinkRestModel.ExecutionConfig(1, Map.empty)) + override def getJobConfig(jobId: JobID): Future[ExecutionConfig] = + Future.successful(ExecutionConfig(1, Map.empty)) - override def cancel(deploymentId: ExternalDeploymentId): Future[Unit] = Future.successful(()) + override def cancel(jobId: JobID): Future[Unit] = Future.successful(()) override def makeSavepoint( - deploymentId: ExternalDeploymentId, + jobId: JobID, savepointDir: Option[String] ): Future[SavepointResult] = Future.successful(SavepointResult(savepointPath)) - override def stop(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] = + override def stop(jobId: JobID, savepointDir: Option[String]): Future[SavepointResult] = Future.successful(SavepointResult(stopSavepointPath)) override def runProgram( @@ -39,14 +45,14 @@ object FlinkClientStub extends FlinkClient { mainClass: String, args: List[String], savepointPath: Option[String], - jobId: Option[String] - ): Future[Option[ExternalDeploymentId]] = Future.successful(None) + jobId: Option[JobID] + ): Future[Option[JobID]] = Future.successful(None) override def deleteJarIfExists(jarFileName: String): Future[Unit] = Future.successful(()) - override def getClusterOverview: Future[flinkRestModel.ClusterOverview] = + override def getClusterOverview: Future[ClusterOverview] = Future.successful( - flinkRestModel.ClusterOverview(`slots-total` = maxParallelism, `slots-available` = maxParallelism) + ClusterOverview(`slots-total` = maxParallelism, `slots-available` = maxParallelism) ) override def getJobManagerConfig: Future[Configuration] = Future.successful(new Configuration) diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicDeploymentManagerTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicDeploymentManagerTest.scala index 1ab3c591fec..93204a18dab 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicDeploymentManagerTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicDeploymentManagerTest.scala @@ -1,12 +1,17 @@ package pl.touk.nussknacker.ui.process.periodic.flink import org.scalatest.concurrent.ScalaFutures +import org.scalatest.exceptions.TestFailedException import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatest.prop.TableDrivenPropertyChecks import org.scalatest.{Inside, OptionValues} +import pl.touk.nussknacker.engine.api.component.NodesDeploymentData import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy +import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy.RestoreStateFromReplacedJobSavepoint +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment._ +import pl.touk.nussknacker.engine.api.deployment.scheduler.services.{EmptyListener, ProcessConfigEnricher} import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessIdWithName, ProcessName, VersionId} @@ -14,13 +19,20 @@ import pl.touk.nussknacker.engine.api.{MetaData, ProcessVersion, StreamMetaData} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.{DeploymentData, User} import pl.touk.nussknacker.test.PatientScalaFutures -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicProcessStatus +import pl.touk.nussknacker.test.base.db.WithHsqlDbTesting +import pl.touk.nussknacker.test.utils.domain.TestFactory +import pl.touk.nussknacker.test.utils.domain.TestFactory.newWriteProcessRepository +import pl.touk.nussknacker.test.utils.scalas.DBIOActionValues +import pl.touk.nussknacker.ui.process.deployment.{CommonCommandData, RunDeploymentCommand, TestDeploymentServiceFactory} +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicScenarioStatus import pl.touk.nussknacker.ui.process.periodic.PeriodicStateStatus.{ScheduledStatus, WaitingForScheduleStatus} import pl.touk.nussknacker.ui.process.periodic._ -import pl.touk.nussknacker.ui.process.periodic.flink.db.InMemPeriodicProcessesRepository -import pl.touk.nussknacker.ui.process.periodic.model.PeriodicProcessDeploymentStatus -import pl.touk.nussknacker.engine.api.deployment.scheduler.services.{EmptyListener, ProcessConfigEnricher} import pl.touk.nussknacker.ui.process.periodic.cron.CronSchedulePropertyExtractor +import pl.touk.nussknacker.ui.process.periodic.flink.db.InMemPeriodicProcessesRepository +import pl.touk.nussknacker.ui.process.periodic.model.{PeriodicProcessDeploymentId, PeriodicProcessDeploymentStatus} +import pl.touk.nussknacker.ui.process.repository.DBIOActionRunner +import pl.touk.nussknacker.ui.process.repository.ProcessRepository.{CreateProcessAction, ProcessCreated} +import pl.touk.nussknacker.ui.security.api.LoggedUser import java.time.{Clock, LocalDateTime, ZoneOffset} import java.util.UUID @@ -32,36 +44,33 @@ class PeriodicDeploymentManagerTest with OptionValues with Inside with TableDrivenPropertyChecks - with PatientScalaFutures { + with PatientScalaFutures + with DBIOActionValues + with WithHsqlDbTesting { protected implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh + private implicit val user: LoggedUser = TestFactory.adminUser("user") + + import TestDeploymentServiceFactory._ import org.scalatest.LoneElement._ - import scala.concurrent.ExecutionContext.Implicits.global + override protected def dbioRunner: DBIOActionRunner = DBIOActionRunner(testDbRef) - private val processName = ProcessName("test1") - private val processId = ProcessId(1) - private val idWithName = ProcessIdWithName(processId, processName) + private val writeProcessRepository = newWriteProcessRepository(testDbRef, clock) + + private val processName = ProcessName("test") private val updateStrategy = DeploymentUpdateStrategy.ReplaceDeploymentWithSameScenarioName( StateRestoringStrategy.RestoreStateFromReplacedJobSavepoint ) - private val processVersion = ProcessVersion( - versionId = VersionId(42L), - processName = processName, - processId = processId, - labels = List.empty, - user = "test user", - modelVersion = None - ) - class Fixture(executionConfig: PeriodicExecutionConfig = PeriodicExecutionConfig()) { - val repository = new InMemPeriodicProcessesRepository(processingType = "testProcessingType") - val delegateDeploymentManagerStub = new DeploymentManagerStub - val scheduledExecutionPerformerStub = new ScheduledExecutionPerformerStub - val preparedDeploymentData = DeploymentData.withDeploymentId(UUID.randomUUID().toString) + val repository = + new InMemPeriodicProcessesRepository(processingType = TestDeploymentServiceFactory.processingType.stringify) + val delegateDeploymentManagerStub = new DeploymentManagerStub + val scheduledExecutionPerformerStub = new ScheduledExecutionPerformerStub + val preparedDeploymentData: DeploymentData = DeploymentData.withDeploymentId(UUID.randomUUID().toString) val periodicProcessService = new PeriodicProcessService( delegateDeploymentManager = delegateDeploymentManagerStub, @@ -78,6 +87,35 @@ class PeriodicDeploymentManagerTest Map.empty, ) + def saveScenario(cronProperty: String = "0 0 * * * ?"): ProcessVersion = { + val scenario = PeriodicProcessGen.buildCanonicalProcess(cronProperty) + saveScenario(scenario) + } + + def saveScenario(scenario: CanonicalProcess): ProcessVersion = { + val action = CreateProcessAction( + processName = processName, + category = "Category1", + canonicalProcess = scenario, + processingType = TestDeploymentServiceFactory.processingType.stringify, + isFragment = false, + forwardedUserName = None + ) + val ProcessCreated(processId, versionId) = + writeProcessRepository.saveNewProcess(action).dbioActionValues.value + + ProcessVersion( + versionId = versionId, + processName = processName, + processId = processId, + labels = List.empty, + user = "test user", + modelVersion = None + ) + } + + // TODO: Instead of using PeriodicDeploymentManager directly, me should use DeploymentService/ScenarioStateProvider + // Thanks to that, we will see the process from user perspective - real scenarios statuses etc/ val periodicDeploymentManager = new PeriodicDeploymentManager( delegate = delegateDeploymentManagerStub, service = periodicProcessService, @@ -86,133 +124,172 @@ class PeriodicDeploymentManagerTest toClose = () => (), ) + private val deploymentServiceFactory = new TestDeploymentServiceFactory(testDbRef) + private val services = deploymentServiceFactory.create(periodicDeploymentManager) + + def schedule(id: ProcessId): Unit = services.deploymentService + .processCommand( + RunDeploymentCommand( + CommonCommandData(ProcessIdWithName(id, processName), None, user), + RestoreStateFromReplacedJobSavepoint, + NodesDeploymentData.empty + ) + ) + .futureValue + .futureValue + + def getSingleActiveDeploymentId(id: ProcessId): PeriodicProcessDeploymentId = inside(getScenarioStatus(id)) { + case periodic: PeriodicScenarioStatus => + periodic.activeDeploymentsStatuses.loneElement.deploymentId + } + def getAllowedActions( - statusDetails: StatusDetails, - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): List[ScenarioActionName] = { + scenarioStatus: StateStatus + ): Set[ScenarioActionName] = { periodicDeploymentManager.processStateDefinitionManager - .processState(statusDetails, latestVersionId, deployedVersionId, currentlyPresentedVersionId) - .allowedActions + .statusActions( + ScenarioStatusWithScenarioContext( + scenarioStatus = scenarioStatus, + deployedVersionId = None, + currentlyPresentedVersionId = Some(VersionId.initialVersionId) + ) + ) } - def getMergedStatusDetails: StatusDetails = - periodicProcessService - .getStatusDetails(processName) - .futureValue - .value - .status - .asInstanceOf[PeriodicProcessStatus] - .mergedStatusDetails + def getScenarioStatus(id: ProcessId): StateStatus = + services.scenarioStatusProvider.getScenarioStatus(ProcessIdWithName(id, processName)).futureValue } - test("getProcessState - should return not deployed for no job") { - val f = new Fixture + implicit class ScenarioStatusOps(scenarioStatus: StateStatus) { - val state = f.getMergedStatusDetails.status + // We simulate logic that is available in PeriodicProcessStateDefinitionManager where we extract PeriodicProcessStatusWithMergedStatus.mergedStatus + // or fallback to no extraction to handle both statuses returned by DM and by core (ScenarioStatusProvider) + // to get a scenario status domain representation that is a SimpleStateStatus or ScheduledStatus + def mergedStatus: StateStatus = + inside(scenarioStatus) { + case periodic: PeriodicScenarioStatus => + periodic.mergedStatus + case other: StateStatus => + other + } + + } + + test("getScenarioStatus - should return not deployed for no job") { + val f = new Fixture + val version = f.saveScenario() + + val state = f.getScenarioStatus(version.processId).mergedStatus state shouldEqual SimpleStateStatus.NotDeployed } - test("getProcessState - should return not deployed for scenario with different processing type") { - val f = new Fixture + test("getScenarioStatus - should return not deployed for scenario with different processing type") { + val f = new Fixture + val version = f.saveScenario() f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled, processingType = "other") - val state = f.getMergedStatusDetails.status + val state = f.getScenarioStatus(version.processId).mergedStatus state shouldEqual SimpleStateStatus.NotDeployed } - test("getProcessState - should be scheduled when scenario scheduled and no job on Flink") { + test("getScenarioStatuss - should be scheduled when scenario scheduled and no job on Flink") { val f = new Fixture - f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled) - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe a[ScheduledStatus] - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val version = f.saveScenario() + f.schedule(version.processId) + + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe a[ScheduledStatus] + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel, - ScenarioActionName.Deploy + ScenarioActionName.Deploy, ) - f.periodicDeploymentManager - .getProcessState( - idWithName, - None, - processVersion.versionId, - Some(processVersion.versionId), - Some(processVersion.versionId) - ) - .futureValue - .value - .status shouldBe a[ScheduledStatus] + f.getScenarioStatus(version.processId).mergedStatus shouldBe a[ScheduledStatus] } - test("getProcessState - should be scheduled when scenario scheduled and job finished on Flink") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + test("getScenarioStatus - should be scheduled when scenario scheduled and job finished on Flink") { + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe a[ScheduledStatus] - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe a[ScheduledStatus] + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel, ScenarioActionName.Deploy ) } - test("getProcessState - should be finished when scenario finished and job finished on Flink") { - val f = new Fixture + test("getScenarioStatus - should be finished when scenario finished and job finished on Flink") { + val f = new Fixture + // We use repository/periodicDeploymentManager directly because run deployment won't accept date in past val periodicProcessId = f.repository.addOnlyProcess(processName, CronScheduleProperty("0 0 0 1 1 ? 1970")) val deploymentId = f.repository.addOnlyDeployment( periodicProcessId, PeriodicProcessDeploymentStatus.Finished, LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) ) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) f.periodicProcessService.deactivate(processName).futureValue - val state = + val scenarioStatus = f.periodicDeploymentManager - .getProcessState(idWithName, None, processVersion.versionId, None, Some(processVersion.versionId)) + .getScenarioDeploymentsStatuses(processName) .futureValue .value + .loneElement + .status - state.status shouldBe SimpleStateStatus.Finished - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + scenarioStatus.mergedStatus shouldBe SimpleStateStatus.Finished + val allowedActions = f.getAllowedActions(scenarioStatus) + allowedActions shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) } - test("getProcessState - should be running when scenario deployed and job running on Flink") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Running, Some(deploymentId)) - - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe SimpleStateStatus.Running - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + test("getScenarioStatus - should be running when scenario deployed and job running on Flink") { + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Running, Some(deploymentId)) + + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe SimpleStateStatus.Running + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) } - test("getProcessState - should be waiting for reschedule if job finished on Flink but scenario is still deployed") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) - - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe WaitingForScheduleStatus - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + test("getScenarioStatus - should be waiting for reschedule if job finished on Flink but scenario is still deployed") { + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe WaitingForScheduleStatus + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) } - test("getProcessState - should be failed after unsuccessful deployment") { - val f = new Fixture - f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Failed) + test("getScenarioStatus - should be failed after unsuccessful deployment") { + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markFailed(deploymentId) - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe ProblemStateStatus.Failed - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe ProblemStateStatus.Failed + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) } @@ -221,46 +298,39 @@ class PeriodicDeploymentManagerTest val f = new Fixture val emptyScenario = CanonicalProcess(MetaData("fooId", StreamMetaData()), List.empty) + val version = f.saveScenario(emptyScenario) val validateResult = f.periodicDeploymentManager - .processCommand(DMValidateScenarioCommand(processVersion, DeploymentData.empty, emptyScenario, updateStrategy)) + .processCommand(DMValidateScenarioCommand(version, DeploymentData.empty, emptyScenario, updateStrategy)) .failed .futureValue validateResult shouldBe a[PeriodicProcessException] val deploymentResult = f.periodicDeploymentManager - .processCommand(DMRunDeploymentCommand(processVersion, DeploymentData.empty, emptyScenario, updateStrategy)) + .processCommand(DMRunDeploymentCommand(version, DeploymentData.empty, emptyScenario, updateStrategy)) .failed .futureValue deploymentResult shouldBe a[PeriodicProcessException] } test("deploy - should schedule periodic scenario") { - val f = new Fixture - - f.periodicDeploymentManager - .processCommand( - DMRunDeploymentCommand( - processVersion, - f.preparedDeploymentData, - PeriodicProcessGen.buildCanonicalProcess(), - updateStrategy - ) - ) - .futureValue + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) f.repository.processEntities.loneElement.active shouldBe true f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Scheduled } test("deploy - should not cancel current schedule after trying to deploy with past date") { - val f = new Fixture + val f = new Fixture + val version = f.saveScenario() f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled) f.periodicDeploymentManager .processCommand( DMRunDeploymentCommand( - processVersion, + version, DeploymentData.empty, PeriodicProcessGen.buildCanonicalProcess("0 0 0 ? * * 2000"), updateStrategy @@ -274,55 +344,48 @@ class PeriodicDeploymentManagerTest } test("deploy - should cancel existing scenario if already scheduled") { - val f = new Fixture - f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled) + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) - f.periodicDeploymentManager - .processCommand( - DMRunDeploymentCommand( - processVersion, - f.preparedDeploymentData, - PeriodicProcessGen.buildCanonicalProcess(), - updateStrategy - ) - ) - .futureValue + f.schedule(version.processId) f.repository.processEntities should have size 2 f.repository.processEntities.map(_.active) shouldBe List(false, true) } test("should get status of failed job") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) - - val statusDetails = f.getMergedStatusDetails - statusDetails.status shouldBe ProblemStateStatus.Failed - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + + val scenarioStatus = f.getScenarioStatus(version.processId) + scenarioStatus.mergedStatus shouldBe ProblemStateStatus.Failed + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) } test("should redeploy failed scenario") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) - val statusDetailsBeforeRedeploy = f.getMergedStatusDetails - statusDetailsBeforeRedeploy.status shouldBe ProblemStateStatus.Failed - f.getAllowedActions( - statusDetailsBeforeRedeploy, - processVersion.versionId, - None, - Some(processVersion.versionId) - ) shouldBe List( + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + val statusBeforeRedeploy = f.getScenarioStatus(version.processId) + statusBeforeRedeploy.mergedStatus shouldBe ProblemStateStatus.Failed + f.getAllowedActions(statusBeforeRedeploy) shouldBe Set( ScenarioActionName.Cancel ) // redeploy is blocked in GUI but API allows it f.periodicDeploymentManager .processCommand( DMRunDeploymentCommand( - processVersion, + version, f.preparedDeploymentData, PeriodicProcessGen.buildCanonicalProcess(), updateStrategy @@ -335,28 +398,24 @@ class PeriodicDeploymentManagerTest PeriodicProcessDeploymentStatus.Failed, PeriodicProcessDeploymentStatus.Scheduled ) - val statusDetailsAfterRedeploy = f.getMergedStatusDetails + val scenarioStatusAfterRedeploy = f.getScenarioStatus(version.processId) // Previous job is still visible as Failed. - statusDetailsAfterRedeploy.status shouldBe a[ScheduledStatus] - f.getAllowedActions( - statusDetailsAfterRedeploy, - processVersion.versionId, - None, - Some(processVersion.versionId) - ) shouldBe List( + scenarioStatusAfterRedeploy.mergedStatus shouldBe a[ScheduledStatus] + f.getAllowedActions(scenarioStatusAfterRedeploy) shouldBe Set( ScenarioActionName.Cancel, ScenarioActionName.Deploy ) } test("should redeploy scheduled scenario") { - val f = new Fixture - f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Scheduled) + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) f.periodicDeploymentManager .processCommand( DMRunDeploymentCommand( - processVersion, + version, f.preparedDeploymentData, PeriodicProcessGen.buildCanonicalProcess(), updateStrategy @@ -372,18 +431,21 @@ class PeriodicDeploymentManagerTest } test("should redeploy running scenario") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Running, Some(deploymentId)) - val statusDetails = f.getMergedStatusDetails - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Running, Some(deploymentId)) + val scenarioStatus = f.getScenarioStatus(version.processId) + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) // redeploy is blocked in GUI but API allows it f.periodicDeploymentManager .processCommand( DMRunDeploymentCommand( - processVersion, + version, f.preparedDeploymentData, PeriodicProcessGen.buildCanonicalProcess(), updateStrategy @@ -399,18 +461,21 @@ class PeriodicDeploymentManagerTest } test("should redeploy finished scenario") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) - val statusDetails = f.getMergedStatusDetails - f.getAllowedActions(statusDetails, processVersion.versionId, None, Some(processVersion.versionId)) shouldBe List( + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + val scenarioStatus = f.getScenarioStatus(version.processId) + f.getAllowedActions(scenarioStatus) shouldBe Set( ScenarioActionName.Cancel ) // redeploy is blocked in GUI but API allows it f.periodicDeploymentManager .processCommand( DMRunDeploymentCommand( - processVersion, + version, f.preparedDeploymentData, PeriodicProcessGen.buildCanonicalProcess(), updateStrategy @@ -426,14 +491,17 @@ class PeriodicDeploymentManagerTest } test("should cancel failed job after RescheduleActor handles finished") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) // this one is cyclically called by RescheduleActor f.periodicProcessService.handleFinished.futureValue - f.getMergedStatusDetails.status shouldEqual ProblemStateStatus.Failed + f.getScenarioStatus(version.processId).mergedStatus shouldEqual ProblemStateStatus.Failed f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Failed f.repository.processEntities.loneElement.active shouldBe true @@ -442,18 +510,21 @@ class PeriodicDeploymentManagerTest f.repository.processEntities.loneElement.active shouldBe false f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Failed - f.getMergedStatusDetails.status shouldEqual SimpleStateStatus.Canceled + f.getScenarioStatus(version.processId).mergedStatus shouldEqual SimpleStateStatus.Canceled } test("should reschedule failed job after RescheduleActor handles finished when configured") { - val f = new Fixture(executionConfig = PeriodicExecutionConfig(rescheduleOnFailure = true)) - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + val f = new Fixture(executionConfig = PeriodicExecutionConfig(rescheduleOnFailure = true)) + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) // this one is cyclically called by RescheduleActor f.periodicProcessService.handleFinished.futureValue - f.getMergedStatusDetails.status shouldBe a[ScheduledStatus] + f.getScenarioStatus(version.processId).mergedStatus shouldBe a[ScheduledStatus] f.repository.deploymentEntities.map(_.status) shouldBe List( PeriodicProcessDeploymentStatus.Failed, PeriodicProcessDeploymentStatus.Scheduled @@ -462,21 +533,27 @@ class PeriodicDeploymentManagerTest } test("should cancel failed job before RescheduleActor handles finished") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) f.periodicDeploymentManager.processCommand(DMCancelScenarioCommand(processName, User("test", "Tester"))).futureValue f.repository.processEntities.loneElement.active shouldBe false f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Failed - f.getMergedStatusDetails.status shouldEqual SimpleStateStatus.Canceled + f.getScenarioStatus(version.processId).mergedStatus shouldEqual SimpleStateStatus.Canceled } test("should cancel failed scenario after disappeared from Flink console") { - val f = new Fixture - val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + val f = new Fixture + val version = f.saveScenario() + f.schedule(version.processId) + val deploymentId = f.getSingleActiveDeploymentId(version.processId) + f.repository.markDeployed(deploymentId) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) // this one is cyclically called by RescheduleActor f.periodicProcessService.handleFinished.futureValue @@ -484,7 +561,7 @@ class PeriodicDeploymentManagerTest // after some time Flink stops returning job status f.delegateDeploymentManagerStub.jobStatus.clear() - f.getMergedStatusDetails.status shouldEqual ProblemStateStatus.Failed + f.getScenarioStatus(version.processId).mergedStatus shouldEqual ProblemStateStatus.Failed f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Failed f.repository.processEntities.loneElement.active shouldBe true @@ -492,36 +569,36 @@ class PeriodicDeploymentManagerTest f.repository.processEntities.loneElement.active shouldBe false f.repository.deploymentEntities.loneElement.status shouldBe PeriodicProcessDeploymentStatus.Failed - f.getMergedStatusDetails.status shouldBe SimpleStateStatus.Canceled + f.getScenarioStatus(version.processId).mergedStatus shouldBe SimpleStateStatus.Canceled } - test("should take into account only latest deployments of active schedules during merged status computation") { - val f = new Fixture - val processId = f.repository.addOnlyProcess(processName) - val firstDeploymentRunAt = LocalDateTime.of(2023, 1, 1, 10, 0) - f.repository.addOnlyDeployment(processId, PeriodicProcessDeploymentStatus.Failed, firstDeploymentRunAt) - f.repository.addOnlyDeployment( - processId, - PeriodicProcessDeploymentStatus.Finished, - firstDeploymentRunAt.plusHours(1) - ) - - f.getMergedStatusDetails.status shouldBe WaitingForScheduleStatus - } - - test( - "should take into account only latest inactive schedule request (periodic process) during merged status computation" - ) { - val f = new Fixture - val firstProcessId = f.repository.addOnlyProcess(processName) - f.repository.addOnlyDeployment(firstProcessId, PeriodicProcessDeploymentStatus.Failed) - f.repository.markInactive(firstProcessId) - - val secProcessId = f.repository.addOnlyProcess(processName) - f.repository.addOnlyDeployment(secProcessId, PeriodicProcessDeploymentStatus.Finished) - f.repository.markInactive(secProcessId) - - f.getMergedStatusDetails.status shouldBe SimpleStateStatus.Finished - } +// test("should take into account only latest deployments of active schedules during merged status computation") { +// val f = new Fixture +// val processId = f.repository.addOnlyProcess(processName) +// val firstDeploymentRunAt = LocalDateTime.of(2023, 1, 1, 10, 0) +// f.repository.addOnlyDeployment(processId, PeriodicProcessDeploymentStatus.Failed, firstDeploymentRunAt) +// f.repository.addOnlyDeployment( +// processId, +// PeriodicProcessDeploymentStatus.Finished, +// firstDeploymentRunAt.plusHours(1) +// ) +// +// f.getScenarioStatus.mergedStatus shouldBe WaitingForScheduleStatus +// } +// +// test( +// "should take into account only latest inactive schedule request (periodic process) during merged status computation" +// ) { +// val f = new Fixture +// val firstProcessId = f.repository.addOnlyProcess(processName) +// f.repository.addOnlyDeployment(firstProcessId, PeriodicProcessDeploymentStatus.Failed) +// f.repository.markInactive(firstProcessId) +// +// val secProcessId = f.repository.addOnlyProcess(processName) +// f.repository.addOnlyDeployment(secProcessId, PeriodicProcessDeploymentStatus.Finished) +// f.repository.markInactive(secProcessId) +// +// f.getScenarioStatus.mergedStatus shouldBe SimpleStateStatus.Finished +// } } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessGen.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessGen.scala index 7111db87542..abda6406c2f 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessGen.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessGen.scala @@ -4,6 +4,7 @@ import pl.touk.nussknacker.engine.api.deployment.scheduler.model.{DeploymentWith import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} import pl.touk.nussknacker.engine.build.ScenarioBuilder import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess +import pl.touk.nussknacker.test.utils.domain.ProcessTestData import pl.touk.nussknacker.ui.process.periodic.CronScheduleProperty import pl.touk.nussknacker.ui.process.periodic.cron.CronSchedulePropertyExtractor.CronPropertyDefaultName import pl.touk.nussknacker.ui.process.periodic.model.{PeriodicProcess, PeriodicProcessId} @@ -32,8 +33,8 @@ object PeriodicProcessGen { ScenarioBuilder .streaming("test") .additionalFields(properties = Map(CronPropertyDefaultName -> cronProperty)) - .source("test", "test") - .emptySink("test", "test") + .source("source", ProcessTestData.existingSourceFactory) + .emptySink("sink", ProcessTestData.existingSinkFactory) } } diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessServiceTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessServiceTest.scala index 9a8e39abdf8..57b52135cfa 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessServiceTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessServiceTest.scala @@ -9,16 +9,15 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.prop.TableDrivenPropertyChecks import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment.scheduler.model.ScheduledDeploymentDetails -import pl.touk.nussknacker.engine.api.deployment.scheduler.services._ -import pl.touk.nussknacker.engine.api.deployment.scheduler.services.AdditionalDeploymentDataProvider import pl.touk.nussknacker.engine.api.deployment.scheduler.services.ProcessConfigEnricher.EnrichedProcessConfig +import pl.touk.nussknacker.engine.api.deployment.scheduler.services._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, ProcessActionId, ProcessingTypeActionServiceStub} import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} import pl.touk.nussknacker.engine.build.ScenarioBuilder import pl.touk.nussknacker.test.PatientScalaFutures -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicProcessStatus +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicScenarioStatus import pl.touk.nussknacker.ui.process.periodic._ import pl.touk.nussknacker.ui.process.periodic.flink.db.InMemPeriodicProcessesRepository import pl.touk.nussknacker.ui.process.periodic.flink.db.InMemPeriodicProcessesRepository.createPeriodicProcessDeployment @@ -225,7 +224,7 @@ class PeriodicProcessServiceTest PeriodicProcessDeploymentStatus.Deployed, processActionId = Some(processActionId) ) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) f.periodicProcessService.handleFinished.futureValue @@ -260,7 +259,7 @@ class PeriodicProcessServiceTest PeriodicProcessDeploymentStatus.Deployed, processActionId = Some(processActionId) ) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.DuringDeploy, Some(deploymentId)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.DuringDeploy, Some(deploymentId)) f.periodicProcessService.handleFinished.futureValue @@ -277,7 +276,7 @@ class PeriodicProcessServiceTest scheduleProperty = cronInPast, processActionId = Some(processActionId) ) - f.delegateDeploymentManagerStub.setStateStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, SimpleStateStatus.Finished, Some(deploymentId)) f.periodicProcessService.handleFinished.futureValue @@ -364,7 +363,7 @@ class PeriodicProcessServiceTest test("handleFinished - should mark as failed for failed Flink job") { val f = new Fixture val deploymentId = f.repository.addActiveProcess(processName, PeriodicProcessDeploymentStatus.Deployed) - f.delegateDeploymentManagerStub.setStateStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) + f.delegateDeploymentManagerStub.setDeploymentStatus(processName, ProblemStateStatus.Failed, Some(deploymentId)) f.periodicProcessService.handleFinished.futureValue @@ -514,13 +513,16 @@ class PeriodicProcessServiceTest val activeSchedules = f.periodicProcessService.getLatestDeploymentsForActiveSchedules(processName).futureValue activeSchedules should have size (schedules.size) - val deployment = f.periodicProcessService - .getStatusDetails(processName) - .futureValue - .value - .status - .asInstanceOf[PeriodicProcessStatus] - .pickMostImportantActiveDeployment + val deployment = PeriodicProcessService + .pickMostImportantActiveDeployment( + f.periodicProcessService + .getMergedStatusDetails(processName) + .futureValue + .value + .status + .asInstanceOf[PeriodicScenarioStatus] + .activeDeploymentsStatuses + ) .value deployment.status shouldBe expectedStatus diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessStateDefinitionManagerTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessStateDefinitionManagerTest.scala index 49f51d166d1..30505f25cb1 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessStateDefinitionManagerTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/process/periodic/flink/PeriodicProcessStateDefinitionManagerTest.scala @@ -2,11 +2,11 @@ package pl.touk.nussknacker.ui.process.periodic.flink import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus -import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext +import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatusDetails, ScenarioActionName} import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.process.VersionId -import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.{PeriodicDeploymentStatus, PeriodicProcessStatus} +import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessService.PeriodicDeploymentStatus import pl.touk.nussknacker.ui.process.periodic.PeriodicProcessStateDefinitionManager.statusTooltip import pl.touk.nussknacker.ui.process.periodic.PeriodicStateStatus import pl.touk.nussknacker.ui.process.periodic.PeriodicStateStatus.ScheduledStatus @@ -39,8 +39,7 @@ class PeriodicProcessStateDefinitionManagerTest extends AnyFunSuite with Matcher processActive = true, None ) - val status = PeriodicProcessStatus(List(deploymentStatus), List.empty) - statusTooltip(status) shouldEqual "Scheduled at: 2023-01-01 10:00 status: Scheduled" + statusTooltip(List(deploymentStatus), List.empty) shouldEqual "Scheduled at: 2023-01-01 10:00 status: Scheduled" } test("display sorted periodic deployment status for named schedules") { @@ -64,17 +63,15 @@ class PeriodicProcessStateDefinitionManagerTest extends AnyFunSuite with Matcher processActive = true, None ) - val status = PeriodicProcessStatus(List(firstDeploymentStatus, secDeploymentStatus), List.empty) - statusTooltip(status) shouldEqual + statusTooltip(List(firstDeploymentStatus, secDeploymentStatus), List.empty) shouldEqual s"""Schedule ${secScheduleId.scheduleName.display} scheduled at: 2023-01-01 10:00 status: Scheduled, |Schedule ${firstScheduleId.scheduleName.display} scheduled at: 2023-01-01 10:00 status: Deployed""".stripMargin } test("not display custom tooltip for perform single execution when latest version is deployed") { PeriodicStateStatus.customActionTooltips( - ProcessStatus( - stateStatus = ScheduledStatus(nextRunAt = LocalDateTime.now()), - latestVersionId = VersionId(5), + ScenarioStatusWithScenarioContext( + scenarioStatus = ScheduledStatus(nextRunAt = LocalDateTime.now()), deployedVersionId = Some(VersionId(5)), currentlyPresentedVersionId = Some(VersionId(5)), ) @@ -85,9 +82,8 @@ class PeriodicProcessStateDefinitionManagerTest extends AnyFunSuite with Matcher "display custom tooltip for perform single execution when deployed version is different than currently displayed" ) { PeriodicStateStatus.customActionTooltips( - ProcessStatus( - stateStatus = ScheduledStatus(nextRunAt = LocalDateTime.now()), - latestVersionId = VersionId(5), + ScenarioStatusWithScenarioContext( + scenarioStatus = ScheduledStatus(nextRunAt = LocalDateTime.now()), deployedVersionId = Some(VersionId(4)), currentlyPresentedVersionId = Some(VersionId(5)), ) @@ -98,9 +94,8 @@ class PeriodicProcessStateDefinitionManagerTest extends AnyFunSuite with Matcher test("display custom tooltip for perform single execution in CANCELED state") { PeriodicStateStatus.customActionTooltips( - ProcessStatus( - stateStatus = SimpleStateStatus.Canceled, - latestVersionId = VersionId(5), + ScenarioStatusWithScenarioContext( + scenarioStatus = SimpleStateStatus.Canceled, deployedVersionId = Some(VersionId(4)), currentlyPresentedVersionId = Some(VersionId(5)), ) diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatisticsTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatisticsTest.scala index 5b25535f359..cee347e23ef 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatisticsTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/ScenarioStatisticsTest.scala @@ -23,7 +23,7 @@ class ScenarioStatisticsTest isFragment = false, ProcessingMode.UnboundedStream, DeploymentManagerType("flinkStreaming"), - Some(SimpleStateStatus.Running), + Some(SimpleStateStatus.Running.name), nodesCount = 2, scenarioCategory = "Category1", scenarioVersion = VersionId(2), @@ -139,7 +139,7 @@ class ScenarioStatisticsTest isFragment = false, ProcessingMode.UnboundedStream, DeploymentManagerType("foo"), - status, + status.map(_.name), nodesCount = 2, scenarioCategory = "Category1", scenarioVersion = VersionId(2), diff --git a/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsServiceTest.scala b/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsServiceTest.scala index e94c084fdc5..502c290d659 100644 --- a/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsServiceTest.scala +++ b/designer/server/src/test/scala/pl/touk/nussknacker/ui/statistics/UsageStatisticsReportsSettingsServiceTest.scala @@ -308,7 +308,7 @@ class UsageStatisticsReportsSettingsServiceTest isFragment = false, ProcessingMode.UnboundedStream, DeploymentManagerType("flinkStreaming"), - Some(SimpleStateStatus.NotDeployed), + Some(SimpleStateStatus.NotDeployed.name), nodesCount = 3, scenarioCategory = "Category1", scenarioVersion = VersionId(2), @@ -322,7 +322,7 @@ class UsageStatisticsReportsSettingsServiceTest isFragment = false, ProcessingMode.UnboundedStream, DeploymentManagerType("flinkStreaming"), - Some(SimpleStateStatus.Running), + Some(SimpleStateStatus.Running.name), nodesCount = 2, scenarioCategory = "Category1", scenarioVersion = VersionId(3), @@ -350,7 +350,7 @@ class UsageStatisticsReportsSettingsServiceTest isFragment = false, ProcessingMode.RequestResponse, DeploymentManagerType("lite-k8s"), - Some(SimpleStateStatus.Running), + Some(SimpleStateStatus.Running.name), nodesCount = 4, scenarioCategory = "Category1", scenarioVersion = VersionId(2), diff --git a/docs/MigrationGuide.md b/docs/MigrationGuide.md index 5ddd97fedf2..f5bd2d22d30 100644 --- a/docs/MigrationGuide.md +++ b/docs/MigrationGuide.md @@ -74,7 +74,15 @@ To see the biggest differences please consult the [changelog](Changelog.md). `FlinkStreamingRestManager` and `FlinkRestManager` abstraction layers were removed - only `FlinkDeploymentManager` exists * [#7563](https://github.com/TouK/nussknacker/pull/7563) `ProcessConfigCreator.buildInfo` and `NkGlobalParameters.buildInfo` were renamed to `modelInfo` Also, they return a `ModelInfo` value class now. To create it from `Map`, use `ModelInfo.fromMap`. To access underlying map, use `ModelInfo.parameters`. - +* [#7566](https://github.com/TouK/nussknacker/pull/7566) Scenario status and deployment statuses are decoupled now + * Changes in `DeploymentManager` interface + * `DeploymentManager` has only `getScenarioDeploymentsStatuses` method (previous `getProcessStates` returning `List[StatusDetails]`). + * Method `DeploymentManager.resolve` should be removed - this work is done by Designer itself + * `DeploymentManagerInconsistentStateHandlerMixIn` mixin should be also removed + * `stateQueryForAllScenariosSupport` was renamed to `deploymentsStatusesQueryForAllScenariosSupport` + * Other changes: + * `StatusDetails` was renamed to `DeploymentStatusDetails` + * Fields: `externalDeploymentId`, `externalDeploymentId`, `attributes`, `attributes` were removed from `StatusDetails` ### Other changes diff --git a/docs/configuration/ScenarioDeploymentConfiguration.md b/docs/configuration/ScenarioDeploymentConfiguration.md index 38997369f4f..969eb5bc83a 100644 --- a/docs/configuration/ScenarioDeploymentConfiguration.md +++ b/docs/configuration/ScenarioDeploymentConfiguration.md @@ -330,7 +330,6 @@ Deployment Manager of type `flinkStreaming` has the following configuration opti | scenarioStateRequestTimeout | duration | 3 seconds | Request timeout for fetching scenario state from Flink | | jobConfigsCacheSize | int | 1000 | Maximum number of cached job configuration elements. | | miniCluster.config | map of strings | [:] | Configuration that will be passed to shared `MiniCluster` | -| miniCluster.streamExecutionEnvConfig | map of strings | [:] | Configuration that will be passed to shared `StreamExecutionEnvironment` used along with `MiniCluster` | | | miniCluster.waitForJobManagerRestAPIAvailableTimeout | duration | 10 seconds | How long Nussknacker should wait fo Flink Mini Cluster REST endpoint. It is only used when `useMiniClusterForDeployment` is enabled | | | scenarioTesting.reuseSharedMiniCluster | boolean | true | Reuses shared mini cluster for each scenario testing attempt | | scenarioTesting.timeout | duration | 55 seconds | Timeout for scenario testing. When scenario test is not finished during this time, testing job will be canceled. This property should be configured along with `akka.http.server.request-timeout` for proper effect. | diff --git a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentDeploymentManagerProvider.scala b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentDeploymentManagerProvider.scala index a27c2a17523..3bbf4769ec8 100644 --- a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentDeploymentManagerProvider.scala +++ b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentDeploymentManagerProvider.scala @@ -26,8 +26,7 @@ import scala.util.{Failure, Success} class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, modelData: BaseModelData) extends DeploymentManager - with LazyLogging - with DeploymentManagerInconsistentStateHandlerMixIn { + with LazyLogging { import SimpleStateStatus._ import dependencies._ @@ -39,15 +38,14 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, private val MinSleepTimeSeconds = 5 private val MaxSleepTimeSeconds = 12 - private val memory: TrieMap[ProcessName, StatusDetails] = TrieMap[ProcessName, StatusDetails]() - private val random = new scala.util.Random() + private val memory: TrieMap[ProcessName, DeploymentStatusDetails] = TrieMap[ProcessName, DeploymentStatusDetails]() + private val random = new scala.util.Random() private val miniClusterWithServices = FlinkMiniClusterFactory .createMiniClusterWithServices( modelData.modelClassLoader, new Configuration, - new Configuration ) private lazy val flinkTestRunner = @@ -58,20 +56,6 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, waitForJobIsFinishedRetryPolicy = 20.seconds.toPausePolicy ) - implicit private class ProcessStateExpandable(processState: StatusDetails) { - - def withStateStatus(stateStatus: StateStatus): StatusDetails = { - StatusDetails( - stateStatus, - processState.deploymentId, - processState.externalDeploymentId, - processState.version, - Some(System.currentTimeMillis()) - ) - } - - } - override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { case DMValidateScenarioCommand(_, _, canonicalProcess, _) => if (description(canonicalProcess).contains(descriptionForValidationFail)) { @@ -119,7 +103,7 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, case None => changeState(processVersion.processName, NotDeployed) } } else { - result.complete(Success(duringDeployStateStatus.externalDeploymentId)) + result.complete(Success(duringDeployStateStatus.deploymentId.map(_.value).map(ExternalDeploymentId(_)))) asyncChangeState(processVersion.processName, Running) } } @@ -146,10 +130,10 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, Future.unit } - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - Future.successful(WithDataFreshnessStatus.fresh(memory.get(name).toList)) + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + Future.successful(WithDataFreshnessStatus.fresh(memory.get(scenarioName).toList)) } override def processStateDefinitionManager: ProcessStateDefinitionManager = @@ -164,15 +148,16 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, } private def changeState(name: ProcessName, stateStatus: StateStatus): Unit = - memory.get(name).foreach { processState => - val newProcessState = processState.withStateStatus(stateStatus) + memory.get(name).foreach { statusDetails => + val newProcessState = statusDetails.copy(status = stateStatus) memory.update(name, newProcessState) - logger.debug(s"Changed scenario $name state from ${processState.status.name} to ${stateStatus.name}.") + logger.debug(s"Changed scenario $name state from ${statusDetails.status.name} to ${stateStatus.name}.") } private def asyncChangeState(name: ProcessName, stateStatus: StateStatus): Unit = - memory.get(name).foreach { processState => - logger.debug(s"Starting async changing state for $name from ${processState.status.name} to ${stateStatus.name}..") + memory.get(name).foreach { statusDetails => + logger + .debug(s"Starting async changing state for $name from ${statusDetails.status.name} to ${stateStatus.name}..") actorSystem.scheduler.scheduleOnce( sleepingTimeSeconds, new Runnable { @@ -182,17 +167,18 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, ) } - private def createAndSaveProcessState(stateStatus: StateStatus, processVersion: ProcessVersion): StatusDetails = { - val processState = StatusDetails( - stateStatus, - None, - Some(ExternalDeploymentId(UUID.randomUUID().toString)), - version = Some(processVersion), - startTime = Some(System.currentTimeMillis()), + private def createAndSaveProcessState( + stateStatus: StateStatus, + processVersion: ProcessVersion + ): DeploymentStatusDetails = { + val statusDetails = DeploymentStatusDetails( + status = stateStatus, + deploymentId = None, + version = Some(processVersion.versionId), ) - memory.update(processVersion.processName, processState) - processState + memory.update(processVersion.processName, statusDetails) + statusDetails } private def sleepingTimeSeconds = FiniteDuration( @@ -202,7 +188,8 @@ class DevelopmentDeploymentManager(dependencies: DeploymentManagerDependencies, override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport } diff --git a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentProcessStateDefinitionManager.scala b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentProcessStateDefinitionManager.scala index bd0ca90b5b0..692a27adaea 100644 --- a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentProcessStateDefinitionManager.scala +++ b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/DevelopmentProcessStateDefinitionManager.scala @@ -1,6 +1,6 @@ package pl.touk.nussknacker.development.manager -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.StateDefinitionDetails.UnknownIcon import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName import pl.touk.nussknacker.engine.api.deployment._ @@ -18,14 +18,11 @@ object DevelopmentStateStatus { val PreparingResourcesStatus: StateStatus = StateStatus("PREPARING") val TestStatus: StateStatus = StateStatus("TEST") - val AfterRunningActionName: ScenarioActionName = ScenarioActionName("AFTER") - val PreparingResourcesActionName: ScenarioActionName = ScenarioActionName("PREPARING") - val TestActionName: ScenarioActionName = ScenarioActionName("TEST") - - val statusActionsPF: PartialFunction[ProcessStatus, List[ScenarioActionName]] = _.stateStatus match { - case DevelopmentStateStatus.AfterRunningStatus => List(ScenarioActionName.Cancel) - case DevelopmentStateStatus.PreparingResourcesStatus => List(ScenarioActionName.Deploy) - case DevelopmentStateStatus.TestStatus => List(ScenarioActionName.Deploy) + val statusActionsPF: PartialFunction[ScenarioStatusWithScenarioContext, Set[ScenarioActionName]] = { + case input if input.scenarioStatus == DevelopmentStateStatus.AfterRunningStatus => Set(ScenarioActionName.Cancel) + case input if input.scenarioStatus == DevelopmentStateStatus.PreparingResourcesStatus => + Set(ScenarioActionName.Deploy) + case input if input.scenarioStatus == DevelopmentStateStatus.TestStatus => Set(ScenarioActionName.Deploy) } val customStateDefinitions: Map[StatusName, StateDefinitionDetails] = Map( diff --git a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/MockableDeploymentManagerProvider.scala b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/MockableDeploymentManagerProvider.scala index b5c44dcc349..6a0d0b8aff3 100644 --- a/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/MockableDeploymentManagerProvider.scala +++ b/engine/development/deploymentManager/src/main/scala/pl/touk/nussknacker/development/manager/MockableDeploymentManagerProvider.scala @@ -18,13 +18,12 @@ import pl.touk.nussknacker.engine.flink.minicluster.scenariotesting.FlinkMiniClu import pl.touk.nussknacker.engine.flink.minicluster.util.DurationToRetryPolicyConverterOps._ import pl.touk.nussknacker.engine.management.FlinkStreamingPropertiesConfig import pl.touk.nussknacker.engine.newdeployment.DeploymentId -import pl.touk.nussknacker.engine.testing.StubbingCommands import pl.touk.nussknacker.engine.testmode.TestProcess.TestResults import java.time.Instant import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.{DurationInt, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future} import scala.util.Try class MockableDeploymentManagerProvider extends DeploymentManagerProvider { @@ -61,14 +60,12 @@ object MockableDeploymentManagerProvider { implicit executionContext: ExecutionContext, ioRuntime: IORuntime ) extends DeploymentManager - with ManagerSpecificScenarioActivitiesStoredByManager - with StubbingCommands { + with ManagerSpecificScenarioActivitiesStoredByManager { private lazy val miniClusterWithServicesOpt = modelDataOpt.map { modelData => FlinkMiniClusterFactory.createMiniClusterWithServices( modelData.modelClassLoader, new Configuration, - new Configuration ) } @@ -82,36 +79,31 @@ object MockableDeploymentManagerProvider { ) } - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - Future.successful( - processStateDefinitionManager.processState( - statusDetails.head, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId - ) - ) - } - override def processStateDefinitionManager: ProcessStateDefinitionManager = SimpleProcessStateDefinitionManager - override def getProcessStates(name: ProcessName)( + override def getScenarioDeploymentsStatuses(scenarioName: ProcessName)( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - val status = MockableDeploymentManager.scenarioStatuses.get().getOrElse(name.value, SimpleStateStatus.NotDeployed) - Future.successful(WithDataFreshnessStatus.fresh(List(StatusDetails(status, None)))) + ): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + val statusDetails = MockableDeploymentManager.scenarioStatuses + .get() + .getOrElse(scenarioName.value, BasicStatusDetails(SimpleStateStatus.NotDeployed, version = None)) + Future.successful( + WithDataFreshnessStatus.fresh( + List( + DeploymentStatusDetails( + statusDetails.status, + None, + version = statusDetails.version + ) + ) + ) + ) } override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = { command match { + case _: DMValidateScenarioCommand => Future.successful(()) case DMRunDeploymentCommand(_, deploymentData, _, _) => Future { deploymentData.deploymentId.toNewDeploymentIdOpt @@ -129,14 +121,16 @@ object MockableDeploymentManagerProvider { s"Tests results not mocked for scenario [${processVersion.processName.value}] and no model data provided" ) ) - case other => - super.processCommand(other) + case _: DMCancelScenarioCommand | _: DMStopScenarioCommand | _: DMStopDeploymentCommand | + _: DMCancelDeploymentCommand | _: DMMakeScenarioSavepointCommand | _: DMRunOffScheduleCommand => + notImplemented } } override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport @@ -156,12 +150,12 @@ object MockableDeploymentManagerProvider { // improved, but there is no need to do it ATM. object MockableDeploymentManager { - private val scenarioStatuses = new AtomicReference[Map[ScenarioName, StateStatus]](Map.empty) + private val scenarioStatuses = new AtomicReference[Map[ScenarioName, BasicStatusDetails]](Map.empty) private val testResults = new AtomicReference[Map[ScenarioName, TestResults[Json]]](Map.empty) private val deploymentResults = new AtomicReference[Map[DeploymentId, Try[Option[ExternalDeploymentId]]]](Map.empty) private val managerSpecificScenarioActivities = new AtomicReference[List[ScenarioActivity]](List.empty) - def configureScenarioStatuses(scenarioStates: Map[ScenarioName, StateStatus]): Unit = { + def configureScenarioStatuses(scenarioStates: Map[ScenarioName, BasicStatusDetails]): Unit = { MockableDeploymentManager.scenarioStatuses.set(scenarioStates) } @@ -187,3 +181,5 @@ object MockableDeploymentManagerProvider { } } + +case class BasicStatusDetails(status: StateStatus, version: Option[VersionId]) diff --git a/engine/flink/components-api/src/main/scala/pl/touk/nussknacker/engine/flink/api/NkGlobalParameters.scala b/engine/flink/components-api/src/main/scala/pl/touk/nussknacker/engine/flink/api/NkGlobalParameters.scala index c8b399db262..ce0658f659f 100644 --- a/engine/flink/components-api/src/main/scala/pl/touk/nussknacker/engine/flink/api/NkGlobalParameters.scala +++ b/engine/flink/components-api/src/main/scala/pl/touk/nussknacker/engine/flink/api/NkGlobalParameters.scala @@ -18,8 +18,12 @@ import scala.jdk.CollectionConverters._ //we can use this class to pass config through RuntimeContext to places where it would be difficult to use otherwise //Also, those configuration properties will be exposed via Flink REST API/webconsole case class NkGlobalParameters( + // This field is not used anywhere. We should consider if we still need it for example for some diagnosis purpose, + // or we should remove it from here modelInfo: ModelInfo, deploymentId: String, // TODO: Pass here DeploymentId? + // Currently only versionId is used in DeploymentStatusDetails, other fields are redundant. We should consider + // if we still need them for example for some diagnosis purpose, or we should remove them from here processVersion: ProcessVersion, configParameters: Option[ConfigGlobalParameters], namespaceParameters: Option[NamespaceMetricsTags], diff --git a/engine/flink/executor/src/main/scala/pl/touk/nussknacker/engine/process/scenariotesting/FlinkScenarioTestingJob.scala b/engine/flink/executor/src/main/scala/pl/touk/nussknacker/engine/process/scenariotesting/FlinkScenarioTestingJob.scala index d823b616a3b..ad14ba12daa 100644 --- a/engine/flink/executor/src/main/scala/pl/touk/nussknacker/engine/process/scenariotesting/FlinkScenarioTestingJob.scala +++ b/engine/flink/executor/src/main/scala/pl/touk/nussknacker/engine/process/scenariotesting/FlinkScenarioTestingJob.scala @@ -58,7 +58,9 @@ private class FlinkScenarioTestingJob(modelData: ModelData) extends LazyLogging ) streamExecutionEnv.getCheckpointConfig.disableCheckpointing() - streamExecutionEnv.execute(scenario.name.value) + // We don't set name because we don't want that these jobs were be included in some other places such + // as scenario status determining + streamExecutionEnv.execute() } protected def prepareRegistrar( diff --git a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/BaseFlinkDeploymentManagerSpec.scala b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/BaseFlinkDeploymentManagerSpec.scala index 25962def989..0b2f95acaf1 100644 --- a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/BaseFlinkDeploymentManagerSpec.scala +++ b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/BaseFlinkDeploymentManagerSpec.scala @@ -1,6 +1,7 @@ package pl.touk.nussknacker.engine.management.streaming import com.typesafe.scalalogging.StrictLogging +import org.apache.flink.api.common.JobID import org.scalatest.funsuite.AnyFunSuiteLike import org.scalatest.matchers.should.Matchers import pl.touk.nussknacker.engine.api.ProcessVersion @@ -11,13 +12,14 @@ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.definition.component.Components.ComponentDefinitionExtractionMode -import pl.touk.nussknacker.engine.deployment.DeploymentData +import pl.touk.nussknacker.engine.deployment.{DeploymentData, DeploymentId, ExternalDeploymentId} import pl.touk.nussknacker.engine.flink.minicluster.FlinkMiniClusterFactory import pl.touk.nussknacker.engine.util.loader.ModelClassLoader import pl.touk.nussknacker.engine.{ModelData, ModelDependencies} import java.net.URI import java.nio.file.{Files, Paths} +import java.util.UUID import scala.concurrent.ExecutionContext.Implicits._ class RemoteFlinkDeploymentManagerSpec extends BaseFlinkDeploymentManagerSpec { @@ -28,11 +30,7 @@ class MiniClusterFlinkDeploymentManagerSpec extends BaseFlinkDeploymentManagerSp override protected def useMiniClusterForDeployment: Boolean = true } -trait BaseFlinkDeploymentManagerSpec - extends AnyFunSuiteLike - with Matchers - with StreamingDockerTest - with StrictLogging { +trait BaseFlinkDeploymentManagerSpec extends AnyFunSuiteLike with Matchers with StreamingDockerTest with StrictLogging { import pl.touk.nussknacker.engine.kafka.KafkaTestUtils.richConsumer @@ -45,12 +43,53 @@ trait BaseFlinkDeploymentManagerSpec test("deploy scenario in running flink") { val processName = ProcessName("runningFlink") - val version = ProcessVersion(VersionId(15), processName, processId, List.empty, "user1", Some(13)) - val process = SampleProcess.prepareProcess(processName) + val version = VersionId(15) + val process = SampleProcess.prepareProcess(processName) + val deploymentId = DeploymentId("not-a-uuid") - deployProcessAndWaitIfRunning(process, version) + val externalDeploymentIdOpt = deployProcessAndWaitIfRunning( + process = process, + processVersion = ProcessVersion(version, processName, processId, List.empty, "user1", Some(13)), + deploymentId = deploymentId + ) try { - processVersion(processName) shouldBe List(version) + deploymentStatus(processName) shouldBe List( + DeploymentStatusDetails( + status = SimpleStateStatus.Running, + deploymentId = Some(deploymentId), + version = Some(version) + ) + ) + externalDeploymentIdOpt shouldBe defined + } finally { + cancelProcess(processName) + } + } + + test("use deploymentId passed as a jobId") { + val processName = ProcessName("jobWithDeploymentIdAsAUuid") + + val version = VersionId(15) + val process = SampleProcess.prepareProcess(processName) + val deploymentIdUuid = UUID.randomUUID() + val deploymentId = DeploymentId(deploymentIdUuid.toString) + + val externalDeploymentIdOpt = deployProcessAndWaitIfRunning( + process = process, + processVersion = ProcessVersion(version, processName, processId, List.empty, "user1", Some(13)), + deploymentId = deploymentId + ) + try { + deploymentStatus(processName) shouldBe List( + DeploymentStatusDetails( + status = SimpleStateStatus.Running, + deploymentId = Some(deploymentId), + version = Some(version) + ) + ) + externalDeploymentIdOpt.value shouldBe ExternalDeploymentId( + new JobID(deploymentIdUuid.getLeastSignificantBits, deploymentIdUuid.getMostSignificantBits).toHexString + ) } finally { cancelProcess(processName) } @@ -177,7 +216,7 @@ trait BaseFlinkDeploymentManagerSpec deployProcessAndWaitIfRunning( processEmittingOneElementAfterStart, empty(processName), - StateRestoringStrategy.RestoreStateFromCustomSavepoint(savepointPath.toString) + stateRestoringStrategy = StateRestoringStrategy.RestoreStateFromCustomSavepoint(savepointPath.toString) ) val messages = messagesFromTopic(outTopic, 2) @@ -203,14 +242,14 @@ trait BaseFlinkDeploymentManagerSpec .processCommand(DMStopScenarioCommand(processName, savepointDir = None, user = userToAct)) .map(_.path) eventually { - val status = deploymentManager.getProcessStates(processName).futureValue + val status = deploymentManager.getScenarioDeploymentsStatuses(processName).futureValue status.value.map(_.status) shouldBe List(SimpleStateStatus.Canceled) } deployProcessAndWaitIfRunning( processEmittingOneElementAfterStart, empty(processName), - StateRestoringStrategy.RestoreStateFromCustomSavepoint(savepointPath.futureValue) + stateRestoringStrategy = StateRestoringStrategy.RestoreStateFromCustomSavepoint(savepointPath.futureValue) ) val messages = messagesFromTopic(outTopic, 2) @@ -313,6 +352,6 @@ trait BaseFlinkDeploymentManagerSpec .map(_.message()) .toList - private def processVersion(name: ProcessName): List[ProcessVersion] = - deploymentManager.getProcessStates(name).futureValue.value.flatMap(_.version) + private def deploymentStatus(name: ProcessName): List[DeploymentStatusDetails] = + deploymentManager.getScenarioDeploymentsStatuses(name).futureValue.value } diff --git a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/JavaConfigDeploymentManagerSpec.scala b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/JavaConfigDeploymentManagerSpec.scala index 16c8b3b24f3..3b9f74ebe28 100644 --- a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/JavaConfigDeploymentManagerSpec.scala +++ b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/JavaConfigDeploymentManagerSpec.scala @@ -46,7 +46,7 @@ class JavaConfigDeploymentManagerSpec extends AnyFunSuite with Matchers with Str ) eventually { - val jobStatus = deploymentManager.getProcessStates(process.name).futureValue.value + val jobStatus = deploymentManager.getScenarioDeploymentsStatuses(process.name).futureValue.value jobStatus.map(_.status) shouldBe List(SimpleStateStatus.Running) } diff --git a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/StreamingDockerTest.scala b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/StreamingDockerTest.scala index b6a2d5738cd..55d2bc2ad62 100644 --- a/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/StreamingDockerTest.scala +++ b/engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/StreamingDockerTest.scala @@ -5,7 +5,7 @@ import cats.effect.kernel.Resource import cats.effect.unsafe.implicits.global import com.typesafe.scalalogging.StrictLogging import org.scalatest.matchers.should.Matchers -import org.scalatest.{Assertion, BeforeAndAfterAll, OptionValues, Suite} +import org.scalatest.{BeforeAndAfterAll, OptionValues, Suite} import pl.touk.nussknacker.engine.ConfigWithUnresolvedVersion import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy @@ -13,11 +13,13 @@ import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess -import pl.touk.nussknacker.engine.deployment.{DeploymentData, ExternalDeploymentId} +import pl.touk.nussknacker.engine.deployment.{DeploymentData, DeploymentId, ExternalDeploymentId} import pl.touk.nussknacker.engine.kafka.KafkaClient import pl.touk.nussknacker.engine.management.DockerTest import pl.touk.nussknacker.engine.util.loader.DeploymentManagersClassLoader +import java.util.UUID + trait StreamingDockerTest extends DockerTest with BeforeAndAfterAll with Matchers with OptionValues { // Warning: we need StrictLogging capability instead of LazyLogging because with LazyLogging we had a deadlock during kafkaClient allocation self: Suite with StrictLogging => @@ -57,27 +59,30 @@ trait StreamingDockerTest extends DockerTest with BeforeAndAfterAll with Matcher protected def deployProcessAndWaitIfRunning( process: CanonicalProcess, processVersion: ProcessVersion, + deploymentId: DeploymentId = DeploymentId(UUID.randomUUID().toString), stateRestoringStrategy: StateRestoringStrategy = StateRestoringStrategy.RestoreStateFromReplacedJobSavepoint - ): Assertion = { - deployProcess(process, processVersion, stateRestoringStrategy) + ): Option[ExternalDeploymentId] = { + val externalDeploymentId = deployProcess(process, processVersion, deploymentId, stateRestoringStrategy) eventually { - val jobStatuses = deploymentManager.getProcessStates(process.name).futureValue.value + val jobStatuses = deploymentManager.getScenarioDeploymentsStatuses(process.name).futureValue.value logger.debug(s"Waiting for deploy: ${process.name}, $jobStatuses") jobStatuses.map(_.status) should contain(SimpleStateStatus.Running) } + externalDeploymentId } protected def deployProcess( process: CanonicalProcess, processVersion: ProcessVersion, - stateRestoringStrategy: StateRestoringStrategy = StateRestoringStrategy.RestoreStateFromReplacedJobSavepoint + deploymentId: DeploymentId, + stateRestoringStrategy: StateRestoringStrategy ): Option[ExternalDeploymentId] = { deploymentManager .processCommand( DMRunDeploymentCommand( processVersion, - DeploymentData.empty, + DeploymentData.empty.copy(deploymentId = deploymentId), process, DeploymentUpdateStrategy.ReplaceDeploymentWithSameScenarioName(stateRestoringStrategy) ) @@ -89,7 +94,7 @@ trait StreamingDockerTest extends DockerTest with BeforeAndAfterAll with Matcher deploymentManager.processCommand(DMCancelScenarioCommand(processName, user = userToAct)).futureValue eventually { val statuses = deploymentManager - .getProcessStates(processName) + .getScenarioDeploymentsStatuses(processName) .futureValue .value val runningOrDuringCancelJobs = statuses diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManager.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManager.scala index fdfa1864a66..a084833da80 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManager.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManager.scala @@ -8,10 +8,9 @@ import org.apache.flink.api.common.{JobID, JobStatus} import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.deployment.inconsistency.InconsistentStateDetector import pl.touk.nussknacker.engine.api.deployment.scheduler.services._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.process.{ProcessIdWithName, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.{DeploymentId, ExternalDeploymentId} import pl.touk.nussknacker.engine.flink.minicluster.FlinkMiniClusterWithServices @@ -23,6 +22,8 @@ import pl.touk.nussknacker.engine.flink.minicluster.util.DurationToRetryPolicyCo import pl.touk.nussknacker.engine.management.FlinkDeploymentManager.DeploymentIdOps import pl.touk.nussknacker.engine.management.jobrunner.FlinkScenarioJobRunner import pl.touk.nussknacker.engine.management.rest.FlinkClient +import pl.touk.nussknacker.engine.management.rest.flinkRestModel.JobOverview +import pl.touk.nussknacker.engine.util.Implicits.RichScalaMap import pl.touk.nussknacker.engine.util.WithDataFreshnessStatusUtils.WithDataFreshnessStatusMapOps import pl.touk.nussknacker.engine.{BaseModelData, DeploymentManagerDependencies, newdeployment} @@ -59,70 +60,6 @@ class FlinkDeploymentManager( private val statusDeterminer = new FlinkStatusDetailsDeterminer(modelData.namingStrategy, client.getJobConfig) - /** - * Gets status from engine, handles finished state, resolves possible inconsistency with lastAction and formats status using `ProcessStateDefinitionManager` - */ - override def resolve( - idWithName: ProcessIdWithName, - statusDetails: List[StatusDetails], - lastStateAction: Option[ProcessAction], - latestVersionId: VersionId, - deployedVersionId: Option[VersionId], - currentlyPresentedVersionId: Option[VersionId], - ): Future[ProcessState] = { - for { - actionAfterPostprocessOpt <- postprocess(idWithName, statusDetails) - engineStateResolvedWithLastAction = InconsistentStateDetector.resolve( - statusDetails, - actionAfterPostprocessOpt.orElse(lastStateAction) - ) - } yield processStateDefinitionManager.processState( - engineStateResolvedWithLastAction, - latestVersionId, - deployedVersionId, - currentlyPresentedVersionId, - ) - } - - // Flink has a retention for job overviews so we can't rely on this to distinguish between statuses: - // - job is finished without troubles - // - job has failed - // So we synchronize the information that the job was finished by marking deployments actions as execution finished - // and treat another case as ProblemStateStatus.shouldBeRunning (see InconsistentStateDetector) - // TODO: We should synchronize the status of deployment more explicitly as we already do in periodic case - // See PeriodicProcessService.synchronizeDeploymentsStates and remove the InconsistentStateDetector - private def postprocess( - idWithName: ProcessIdWithName, - statusDetailsList: List[StatusDetails] - ): Future[Option[ProcessAction]] = { - val allDeploymentIdsAsCorrectActionIds = - statusDetailsList.flatMap(details => - details.deploymentId.flatMap(_.toActionIdOpt).map(id => (id, details.status)) - ) - markEachFinishedDeploymentAsExecutionFinishedAndReturnLastStateAction( - idWithName, - allDeploymentIdsAsCorrectActionIds - ) - } - - private def markEachFinishedDeploymentAsExecutionFinishedAndReturnLastStateAction( - idWithName: ProcessIdWithName, - deploymentActionStatuses: List[(ProcessActionId, StateStatus)] - ): Future[Option[ProcessAction]] = { - val finishedDeploymentActionsIds = deploymentActionStatuses.collect { case (id, SimpleStateStatus.Finished) => - id - } - Future.sequence(finishedDeploymentActionsIds.map(actionService.markActionExecutionFinished)).flatMap { - markingResult => - Option(markingResult) - .filter(_.contains(true)) - .map { _ => - actionService.getLastStateAction(idWithName.id) - } - .getOrElse(Future.successful(None)) - } - } - override def processCommand[Result](command: DMScenarioCommand[Result]): Future[Result] = command match { case command: DMValidateScenarioCommand => validate(command) @@ -154,7 +91,7 @@ class FlinkDeploymentManager( case DeploymentUpdateStrategy.ReplaceDeploymentWithSameScenarioName(_) => oldJobsToStop(processVersion) case DeploymentUpdateStrategy.DontReplaceDeployment => Future.successful(List.empty) } - _ <- checkRequiredSlotsExceedAvailableSlots(canonicalProcess, oldJobs.flatMap(_.externalDeploymentId)) + _ <- checkRequiredSlotsExceedAvailableSlots(canonicalProcess, oldJobs.map(_.jid)) } yield () } @@ -176,12 +113,12 @@ class FlinkDeploymentManager( _ = { logger.debug(s"Starting to deploy scenario: $processName with savepoint $savepointPath") } - runResult <- jobRunner.runScenarioJob( + jobIdOpt <- jobRunner.runScenarioJob( command, savepointPath, ) - _ <- runResult.map(waitForDuringDeployFinished(processName, _)).getOrElse(Future.successful(())) - } yield runResult + _ <- jobIdOpt.map(waitForDuringDeployFinished(processName, _)).getOrElse(Future.successful(())) + } yield jobIdOpt.map(_.toHexString).map(ExternalDeploymentId(_)) } private def stopOldJobsIfNeeded(command: DMRunDeploymentCommand) = { @@ -192,8 +129,8 @@ class FlinkDeploymentManager( for { oldJobs <- oldJobsToStop(processVersion) externalDeploymentIds = oldJobs - .sortBy(_.startTime)(Ordering[Option[Long]].reverse) - .flatMap(_.externalDeploymentId) + .sortBy(_.`start-time`)(Ordering[Long].reverse) + .map(_.jid) savepoints <- Future.sequence( externalDeploymentIds.map(stopSavingSavepoint(processVersion, _, canonicalProcess)) ) @@ -203,10 +140,13 @@ class FlinkDeploymentManager( } } - private def oldJobsToStop(processVersion: ProcessVersion): Future[List[StatusDetails]] = { + private def oldJobsToStop(processVersion: ProcessVersion): Future[List[JobOverview]] = { implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - getProcessStates(processVersion.processName) - .map(_.value.filter(details => SimpleStateStatus.DefaultFollowingDeployStatuses.contains(details.status))) + getScenarioDeploymentsStatusesWithJobOverview(processVersion.processName) + .map(_.value.collect { + case (details, jobOverview) if SimpleStateStatus.DefaultFollowingDeployStatuses.contains(details.status) => + jobOverview + }) } private def determineSavepointPath(updateStrategy: DeploymentUpdateStrategy, stoppedJobsSavepoints: List[String]) = @@ -223,15 +163,20 @@ class FlinkDeploymentManager( Some(savepointPath) } - private def requireSingleRunningJob[T](processName: ProcessName, statusDetailsPredicate: StatusDetails => Boolean)( - action: ExternalDeploymentId => Future[T] + private def requireSingleRunningJob[T]( + processName: ProcessName, + statusDetailsPredicate: DeploymentStatusDetails => Boolean + )( + action: JobID => Future[T] ): Future[T] = { implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - getProcessStates(processName).flatMap { statuses => - val runningDeploymentIds = statuses.value.filter(statusDetailsPredicate).collect { - case StatusDetails(SimpleStateStatus.Running, _, Some(deploymentId), _, _, _, _) => deploymentId + getScenarioDeploymentsStatusesWithJobOverview(processName).flatMap { statusesWithJobOverviews => + val runningJobIds = statusesWithJobOverviews.value.collect { + case (details @ DeploymentStatusDetails(SimpleStateStatus.Running, Some(_), _), jobOverview) + if statusDetailsPredicate(details) => + jobOverview.jid } - runningDeploymentIds match { + runningJobIds match { case Nil => Future.failed(new IllegalStateException(s"Job $processName not found")) case single :: Nil => @@ -244,7 +189,7 @@ class FlinkDeploymentManager( private def stopSavingSavepoint( processVersion: ProcessVersion, - deploymentId: ExternalDeploymentId, + deploymentId: JobID, canonicalProcess: CanonicalProcess ): Future[String] = { logger.debug(s"Making savepoint of ${processVersion.processName}. Deployment: $deploymentId") @@ -266,10 +211,16 @@ class FlinkDeploymentManager( else Future.successful(()) - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - getAllProcessesStatesFromFlink().map(_.getOrElse(name, List.empty)) + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { + getScenarioDeploymentsStatusesWithJobOverview(scenarioName).map(_.map(_.map(_._1))) + } + + protected def getScenarioDeploymentsStatusesWithJobOverview(scenarioName: ProcessName)( + implicit freshnessPolicy: DataFreshnessPolicy + ): Future[WithDataFreshnessStatus[List[(DeploymentStatusDetails, JobOverview)]]] = { + getAllJobsStatusesFromFlink().map(_.getOrElse(scenarioName, List.empty)) } override val deploymentSynchronisationSupport: DeploymentSynchronisationSupport = @@ -295,12 +246,13 @@ class FlinkDeploymentManager( } - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = - new StateQueryForAllScenariosSupported { + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + new DeploymentsStatusesQueryForAllScenariosSupported { - override def getAllProcessesStates()( + override def getAllScenariosDeploymentsStatuses()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]] = getAllProcessesStatesFromFlink() + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[DeploymentStatusDetails]]]] = + getAllJobsStatusesFromFlink().map(_.map(_.mapValuesNow(_.map(_._1)))) } @@ -312,9 +264,9 @@ class FlinkDeploymentManager( } - private def getAllProcessesStatesFromFlink()( + private def getAllJobsStatusesFromFlink()( implicit freshnessPolicy: DataFreshnessPolicy - ): Future[WithDataFreshnessStatus[Map[ProcessName, List[StatusDetails]]]] = { + ): Future[WithDataFreshnessStatus[Map[ProcessName, List[(DeploymentStatusDetails, JobOverview)]]]] = { client .getJobsOverviews() .flatMap { result => @@ -328,7 +280,7 @@ class FlinkDeploymentManager( private def waitForDuringDeployFinished( processName: ProcessName, - deploymentId: ExternalDeploymentId + jobId: JobID ): Future[Unit] = { flinkConfig.waitForDuringDeployFinish.toEnabledConfig .map { config => @@ -336,12 +288,11 @@ class FlinkDeploymentManager( .Pause(config.maxChecks, config.delay) .apply { implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - getProcessStates(processName).map { statuses => - statuses.value - .find(details => - details.externalDeploymentId - .contains(deploymentId) && details.status == SimpleStateStatus.DuringDeploy - ) + getScenarioDeploymentsStatusesWithJobOverview(processName).map { statusesWithJobOverview => + statusesWithJobOverview.value + .find { case (details, jobOverview) => + jobOverview.jid == jobId && details.status == SimpleStateStatus.DuringDeploy + } .map(Left(_)) .getOrElse(Right(())) } @@ -360,42 +311,49 @@ class FlinkDeploymentManager( protected def cancelScenario(command: DMCancelScenarioCommand): Future[Unit] = { import command._ implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - getProcessStates(scenarioName).map(_.value).flatMap { statuses => - cancelEachMatchingJob(scenarioName, None, statuses) + getScenarioDeploymentsStatusesWithJobOverview(scenarioName).map(_.value).flatMap { detailsWithJobOverview => + cancelEachMatchingJob(scenarioName, None, detailsWithJobOverview) } } private def cancelDeployment(command: DMCancelDeploymentCommand): Future[Unit] = { import command._ implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh - getProcessStates(scenarioName).map(_.value).flatMap { statuses => - cancelEachMatchingJob(scenarioName, Some(deploymentId), statuses.filter(_.deploymentId.contains(deploymentId))) + getScenarioDeploymentsStatusesWithJobOverview(scenarioName).map(_.value).flatMap { detailsWithJobOverview => + cancelEachMatchingJob( + scenarioName, + Some(deploymentId), + detailsWithJobOverview.filter(_._1.deploymentId.contains(deploymentId)) + ) } } private def cancelEachMatchingJob( processName: ProcessName, deploymentId: Option[DeploymentId], - statuses: List[StatusDetails] + detailsWithJobOverview: List[(DeploymentStatusDetails, JobOverview)] ) = { - statuses.filterNot(details => SimpleStateStatus.isFinalOrTransitioningToFinalStatus(details.status)) match { + detailsWithJobOverview.collect { + case (details, jobOverview) if !SimpleStateStatus.isFinalOrTransitioningToFinalStatus(details.status) => + jobOverview.jid + } match { case Nil => logger.warn( s"Trying to cancel $processName${deploymentId.map(" with id: " + _).getOrElse("")} which is not active on Flink." ) Future.successful(()) - case single :: Nil => client.cancel(single.externalDeploymentIdUnsafe) - case moreThanOne @ (_ :: _ :: _) => + case singleJobId :: Nil => client.cancel(singleJobId) + case moreThanOneJobIds => logger.warn( - s"Found duplicate jobs of $processName${deploymentId.map(" with id: " + _).getOrElse("")}: $moreThanOne. Cancelling all in non terminal state." + s"Found duplicate jobs of $processName${deploymentId.map(" with id: " + _).getOrElse("")}: $moreThanOneJobIds. Cancelling all in non terminal state." ) - Future.sequence(moreThanOne.map(_.externalDeploymentIdUnsafe).map(client.cancel)).map(_ => ()) + Future.sequence(moreThanOneJobIds.map(client.cancel)).map(_ => ()) } } private def checkRequiredSlotsExceedAvailableSlots( canonicalProcess: CanonicalProcess, - currentlyDeployedJobsIds: List[ExternalDeploymentId] + currentlyDeployedJobsIds: List[JobID] ): Future[Unit] = { if (flinkConfig.shouldCheckAvailableSlots) { slotsChecker.checkRequiredSlotsExceedAvailableSlots(canonicalProcess, currentlyDeployedJobsIds) @@ -415,8 +373,8 @@ class FlinkDeploymentManager( object FlinkDeploymentManager { implicit class DeploymentIdOps(did: newdeployment.DeploymentId) { - def toJobID: String = - new JobID(did.value.getLeastSignificantBits, did.value.getMostSignificantBits).toHexString + def toJobID: JobID = + new JobID(did.value.getLeastSignificantBits, did.value.getMostSignificantBits) } } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkScheduledExecutionPerformer.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkScheduledExecutionPerformer.scala index 10d7583067e..a551fa89d3e 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkScheduledExecutionPerformer.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkScheduledExecutionPerformer.scala @@ -94,13 +94,15 @@ class FlinkScheduledExecutionPerformer( deploymentData, canonicalProcess, ) - flinkClient.runProgram( - jarFile, - RemoteFlinkScenarioJobRunner.MainClassName, - args, - None, - deploymentData.deploymentId.toNewDeploymentIdOpt.map(toJobId) - ) + flinkClient + .runProgram( + jarFile, + RemoteFlinkScenarioJobRunner.MainClassName, + args, + None, + deploymentData.deploymentId.toNewDeploymentIdOpt.map(toJobId) + ) + .map(_.map(jobId => ExternalDeploymentId(jobId.toHexString))) case None => logger.error( s"Cannot deploy scenario ${deployment.processName}, version id: ${deployment.versionId}: jar file name not present" @@ -131,7 +133,7 @@ class FlinkScheduledExecutionPerformer( } private def toJobId(did: newdeployment.DeploymentId) = { - new JobID(did.value.getLeastSignificantBits, did.value.getMostSignificantBits).toHexString + new JobID(did.value.getLeastSignificantBits, did.value.getMostSignificantBits) } } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkSlotsChecker.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkSlotsChecker.scala index c8e2761af4f..00bb5f49ea9 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkSlotsChecker.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkSlotsChecker.scala @@ -3,11 +3,11 @@ package pl.touk.nussknacker.engine.management import cats.data.OptionT import cats.implicits._ import com.typesafe.scalalogging.LazyLogging +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.CoreOptions import pl.touk.nussknacker.engine.api.StreamMetaData import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.FlinkSlotsChecker.{NotEnoughSlotsException, SlotsBalance} import pl.touk.nussknacker.engine.management.rest.FlinkClient import pl.touk.nussknacker.engine.management.rest.flinkRestModel.ClusterOverview @@ -19,7 +19,7 @@ class FlinkSlotsChecker(client: FlinkClient)(implicit ec: ExecutionContext) exte def checkRequiredSlotsExceedAvailableSlots( canonicalProcess: CanonicalProcess, - currentlyDeployedJobsIds: List[ExternalDeploymentId] + currentlyDeployedJobsIds: List[JobID] ): Future[Unit] = { val collectedSlotsCheckInputs = for { slotsBalance <- determineSlotsBalance(canonicalProcess, currentlyDeployedJobsIds) @@ -47,7 +47,7 @@ class FlinkSlotsChecker(client: FlinkClient)(implicit ec: ExecutionContext) exte private def determineSlotsBalance( canonicalProcess: CanonicalProcess, - currentlyDeployedJobsIds: List[ExternalDeploymentId] + currentlyDeployedJobsIds: List[JobID] ): OptionT[Future, SlotsBalance] = { canonicalProcess.metaData.typeSpecificData match { case stream: StreamMetaData => @@ -61,12 +61,11 @@ class FlinkSlotsChecker(client: FlinkClient)(implicit ec: ExecutionContext) exte } private def slotsThatWillBeReleasedAfterJobCancel( - currentlyDeployedJobsIds: List[ExternalDeploymentId] + currentlyDeployedJobsIds: List[JobID] ): Future[Int] = { Future .sequence( - currentlyDeployedJobsIds - .map(deploymentId => client.getJobConfig(deploymentId.value).map(_.`job-parallelism`)) + currentlyDeployedJobsIds.map(client.getJobConfig(_).map(_.`job-parallelism`)) ) .map(_.sum) } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStateStatus.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStateStatus.scala index b7453dc58a7..eac6adecd9f 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStateStatus.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStateStatus.scala @@ -1,6 +1,6 @@ package pl.touk.nussknacker.engine.management -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.ScenarioActionName import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus @@ -10,9 +10,9 @@ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus */ object FlinkStateStatus { - val statusActionsPF: PartialFunction[ProcessStatus, List[ScenarioActionName]] = _.stateStatus match { - case SimpleStateStatus.DuringDeploy => List(ScenarioActionName.Cancel) - case SimpleStateStatus.Restarting => List(ScenarioActionName.Cancel) + val statusActionsPF: PartialFunction[ScenarioStatusWithScenarioContext, Set[ScenarioActionName]] = { + case input if input.scenarioStatus == SimpleStateStatus.DuringDeploy => Set(ScenarioActionName.Cancel) + case input if input.scenarioStatus == SimpleStateStatus.Restarting => Set(ScenarioActionName.Cancel) } } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStatusDetailsDeterminer.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStatusDetailsDeterminer.scala index 47f75b26633..5c2b1ea7b43 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStatusDetailsDeterminer.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkStatusDetailsDeterminer.scala @@ -1,13 +1,12 @@ package pl.touk.nussknacker.engine.management import com.typesafe.scalalogging.LazyLogging -import org.apache.flink.api.common.JobStatus -import pl.touk.nussknacker.engine.api.ProcessVersion +import org.apache.flink.api.common.{JobID, JobStatus} import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatus, StatusDetails} +import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatus, DeploymentStatusDetails} import pl.touk.nussknacker.engine.api.namespaces.NamingStrategy -import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} -import pl.touk.nussknacker.engine.deployment.{DeploymentId, ExternalDeploymentId} +import pl.touk.nussknacker.engine.api.process.{ProcessName, VersionId} +import pl.touk.nussknacker.engine.deployment.DeploymentId import pl.touk.nussknacker.engine.management.FlinkStatusDetailsDeterminer.{ParsedJobConfig, toDeploymentStatus} import pl.touk.nussknacker.engine.management.rest.flinkRestModel import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{BaseJobStatusCounts, JobOverview} @@ -17,11 +16,13 @@ import scala.concurrent.{ExecutionContext, Future} class FlinkStatusDetailsDeterminer( namingStrategy: NamingStrategy, - getJobConfig: String => Future[flinkRestModel.ExecutionConfig] + getJobConfig: JobID => Future[flinkRestModel.ExecutionConfig] )(implicit ec: ExecutionContext) extends LazyLogging { - def statusDetailsFromJobOverviews(jobOverviews: List[JobOverview]): Future[Map[ProcessName, List[StatusDetails]]] = + def statusDetailsFromJobOverviews( + jobOverviews: List[JobOverview] + ): Future[Map[ProcessName, List[(DeploymentStatusDetails, JobOverview)]]] = Future .sequence { for { @@ -29,48 +30,36 @@ class FlinkStatusDetailsDeterminer( name <- namingStrategy.decodeName(job.name).map(ProcessName(_)) } yield withParsedJobConfig(job.jid, name).map { jobConfigOpt => val details = jobConfigOpt.map { jobConfig => - StatusDetails( - SimpleStateStatus.fromDeploymentStatus(toDeploymentStatus(JobStatus.valueOf(job.state), job.tasks)), - jobConfig.deploymentId, - Some(ExternalDeploymentId(job.jid)), - version = Some(jobConfig.version), - startTime = Some(job.`start-time`), - attributes = Option.empty, - errors = List.empty + DeploymentStatusDetails( + status = + SimpleStateStatus.fromDeploymentStatus(toDeploymentStatus(JobStatus.valueOf(job.state), job.tasks)), + deploymentId = jobConfig.deploymentId, + version = Some(jobConfig.version) ) } getOrElse { logger.debug( s"No correct job config in deployed scenario: $name. Returning ${SimpleStateStatus.DuringDeploy} without version" ) - StatusDetails( + DeploymentStatusDetails( SimpleStateStatus.DuringDeploy, // For scheduling mechanism this fallback is probably wrong // TODO: switch scheduling mechanism deployment ids to UUIDs - Some(DeploymentId(job.jid)), - Some(ExternalDeploymentId(job.jid)), + Some(DeploymentId(job.jid.toHexString)), version = None, - startTime = Some(job.`start-time`), - attributes = Option.empty, - errors = List.empty ) } - name -> details + name -> (details, job) } } .map(_.toGroupedMap) - private def withParsedJobConfig(jobId: String, name: ProcessName): Future[Option[ParsedJobConfig]] = { + private def withParsedJobConfig(jobId: JobID, name: ProcessName): Future[Option[ParsedJobConfig]] = { getJobConfig(jobId).map { executionConfig => val userConfig = executionConfig.`user-config` for { version <- userConfig.get("versionId").flatMap(_.asString).map(_.toLong).map(VersionId(_)) - user <- userConfig.get("user").map(_.asString.getOrElse("")) - modelVersion = userConfig.get("modelVersion").flatMap(_.asString).map(_.toInt) - processId = ProcessId(userConfig.get("processId").flatMap(_.asString).map(_.toLong).getOrElse(-1L)) - labels = userConfig.get("labels").flatMap(_.asArray).map(_.toList.flatMap(_.asString)).toList.flatten deploymentId = userConfig.get("deploymentId").flatMap(_.asString).map(DeploymentId(_)) } yield { - val versionDetails = ProcessVersion(version, name, processId, labels, user, modelVersion) - ParsedJobConfig(versionDetails, deploymentId) + ParsedJobConfig(version, deploymentId) } } } @@ -81,7 +70,7 @@ object FlinkStatusDetailsDeterminer { // TODO: deploymentId is optional to handle situation when on Flink there is old version of runtime and in designer is the new one. // After fully deploy of new version it should be mandatory - private case class ParsedJobConfig(version: ProcessVersion, deploymentId: Option[DeploymentId]) + private case class ParsedJobConfig(version: VersionId, deploymentId: Option[DeploymentId]) private[management] def toDeploymentStatus( jobStatus: JobStatus, diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkMiniClusterScenarioJobRunner.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkMiniClusterScenarioJobRunner.scala index 6a0a6fa721c..fcc70515820 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkMiniClusterScenarioJobRunner.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkMiniClusterScenarioJobRunner.scala @@ -1,12 +1,12 @@ package pl.touk.nussknacker.engine.management.jobrunner -import org.apache.flink.api.common.JobExecutionResult -import org.apache.flink.configuration.Configuration +import org.apache.flink.api.common.{JobExecutionResult, JobID} +import org.apache.flink.configuration.{Configuration, PipelineOptionsInternal} import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings import pl.touk.nussknacker.engine.BaseModelData import pl.touk.nussknacker.engine.api.deployment.DMRunDeploymentCommand -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.flink.minicluster.FlinkMiniClusterWithServices +import pl.touk.nussknacker.engine.management.FlinkDeploymentManager.DeploymentIdOps import pl.touk.nussknacker.engine.util.ReflectiveMethodInvoker import scala.concurrent.{ExecutionContext, Future} @@ -29,14 +29,17 @@ class FlinkMiniClusterScenarioJobRunner( override def runScenarioJob( command: DMRunDeploymentCommand, savepointPathOpt: Option[String] - ): Future[Option[ExternalDeploymentId]] = { + ): Future[Option[JobID]] = { Future { miniClusterWithServices.withDetachedStreamExecutionEnvironment { env => + val conf = new Configuration() savepointPathOpt.foreach { savepointPath => - val conf = new Configuration() SavepointRestoreSettings.toConfiguration(SavepointRestoreSettings.forPath(savepointPath, true), conf) - env.configure(conf) } + command.deploymentData.deploymentId.toNewDeploymentIdOpt.map(_.toJobID).foreach { jobId => + conf.set(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID, jobId.toHexString) + } + env.configure(conf) val jobID = jobInvoker .invokeStaticMethod( modelData, @@ -46,7 +49,7 @@ class FlinkMiniClusterScenarioJobRunner( env ) .getJobID - Some(ExternalDeploymentId(jobID.toHexString)) + Some(jobID) } } } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkScenarioJobRunner.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkScenarioJobRunner.scala index 44459f0ee12..bb46821ca13 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkScenarioJobRunner.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/FlinkScenarioJobRunner.scala @@ -1,7 +1,7 @@ package pl.touk.nussknacker.engine.management.jobrunner +import org.apache.flink.api.common.JobID import pl.touk.nussknacker.engine.api.deployment.DMRunDeploymentCommand -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import scala.concurrent.Future @@ -10,6 +10,6 @@ trait FlinkScenarioJobRunner { def runScenarioJob( command: DMRunDeploymentCommand, savepointPathOpt: Option[String] - ): Future[Option[ExternalDeploymentId]] + ): Future[Option[JobID]] } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/RemoteFlinkScenarioJobRunner.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/RemoteFlinkScenarioJobRunner.scala index 0ffb2d7df6c..26db6a0781f 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/RemoteFlinkScenarioJobRunner.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/jobrunner/RemoteFlinkScenarioJobRunner.scala @@ -1,25 +1,27 @@ package pl.touk.nussknacker.engine.management.jobrunner import io.circe.syntax.EncoderOps +import org.apache.flink.api.common.JobID import pl.touk.nussknacker.engine.BaseModelData import pl.touk.nussknacker.engine.api.ProcessVersion import pl.touk.nussknacker.engine.api.deployment.DMRunDeploymentCommand import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess -import pl.touk.nussknacker.engine.deployment.{DeploymentData, ExternalDeploymentId} +import pl.touk.nussknacker.engine.deployment.DeploymentData import pl.touk.nussknacker.engine.management.FlinkDeploymentManager.DeploymentIdOps import pl.touk.nussknacker.engine.management.jobrunner.RemoteFlinkScenarioJobRunner.{MainClassName, prepareProgramArgs} import pl.touk.nussknacker.engine.management.rest.FlinkClient -import scala.concurrent.Future +import scala.concurrent.{ExecutionContext, Future} -class RemoteFlinkScenarioJobRunner(modelData: BaseModelData, client: FlinkClient) extends FlinkScenarioJobRunner { +class RemoteFlinkScenarioJobRunner(modelData: BaseModelData, client: FlinkClient)(implicit ec: ExecutionContext) + extends FlinkScenarioJobRunner { private val modelJarProvider = new FlinkModelJarProvider(modelData.modelClassLoaderUrls) override def runScenarioJob( command: DMRunDeploymentCommand, savepointPathOpt: Option[String] - ): Future[Option[ExternalDeploymentId]] = { + ): Future[Option[JobID]] = { import command._ val args = prepareProgramArgs( modelData.inputConfigDuringExecution.serialized, diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClient.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClient.scala index bc0e78aa3c5..8dd32a5f383 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClient.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClient.scala @@ -1,10 +1,10 @@ package pl.touk.nussknacker.engine.management.rest import com.github.benmanes.caffeine.cache.{AsyncCache, Cache, Caffeine} +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.Configuration import pl.touk.nussknacker.engine.api.deployment.DataFreshnessPolicy.{CanBeCached, Fresh} import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, SavepointResult, WithDataFreshnessStatus} -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{ExecutionConfig, JobDetails, JobOverview} import java.io.File @@ -26,11 +26,11 @@ class CachedFlinkClient(delegate: FlinkClient, jobsOverviewCacheTTL: FiniteDurat .expireAfterWrite(java.time.Duration.ofMillis(jobsOverviewCacheTTL.toMillis)) .buildAsync[String, List[JobOverview]]() - private val jobsConfigCache: Cache[String, ExecutionConfig] = + private val jobsConfigCache: Cache[JobID, ExecutionConfig] = Caffeine .newBuilder() .maximumSize(jobsConfigCacheSize) - .build[String, ExecutionConfig]() + .build[JobID, ExecutionConfig]() override def deleteJarIfExists(jarFileName: String): Future[Unit] = delegate.deleteJarIfExists(jarFileName) @@ -57,7 +57,7 @@ class CachedFlinkClient(delegate: FlinkClient, jobsOverviewCacheTTL: FiniteDurat ) } - override def getJobConfig(jobId: String): Future[ExecutionConfig] = + override def getJobConfig(jobId: JobID): Future[ExecutionConfig] = Option(jobsConfigCache.getIfPresent(jobId)) .map(Future.successful) .getOrElse( @@ -69,27 +69,27 @@ class CachedFlinkClient(delegate: FlinkClient, jobsOverviewCacheTTL: FiniteDurat } ) - override def getJobDetails(jobId: String): Future[Option[JobDetails]] = delegate.getJobDetails(jobId) + override def getJobDetails(jobId: JobID): Future[Option[JobDetails]] = delegate.getJobDetails(jobId) - override def cancel(deploymentId: ExternalDeploymentId): Future[Unit] = - delegate.cancel(deploymentId) + override def cancel(jobId: JobID): Future[Unit] = + delegate.cancel(jobId) override def makeSavepoint( - deploymentId: ExternalDeploymentId, + jobId: JobID, savepointDir: Option[String] ): Future[SavepointResult] = - delegate.makeSavepoint(deploymentId, savepointDir) + delegate.makeSavepoint(jobId, savepointDir) - override def stop(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] = - delegate.stop(deploymentId, savepointDir) + override def stop(jobId: JobID, savepointDir: Option[String]): Future[SavepointResult] = + delegate.stop(jobId, savepointDir) override def runProgram( jarFile: File, mainClass: String, args: List[String], savepointPath: Option[String], - jobId: Option[String] - ): Future[Option[ExternalDeploymentId]] = + jobId: Option[JobID] + ): Future[Option[JobID]] = delegate.runProgram(jarFile, mainClass, args, savepointPath, jobId) // TODO: Do we need cache here? diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/FlinkClient.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/FlinkClient.scala index 73d35999087..5d562cd0ab9 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/FlinkClient.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/FlinkClient.scala @@ -1,9 +1,9 @@ package pl.touk.nussknacker.engine.management.rest import com.typesafe.scalalogging.LazyLogging +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.Configuration import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, SavepointResult, WithDataFreshnessStatus} -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{ClusterOverview, JobDetails, JobOverview} import sttp.client3.SttpBackend @@ -18,15 +18,15 @@ trait FlinkClient { implicit freshnessPolicy: DataFreshnessPolicy ): Future[WithDataFreshnessStatus[List[JobOverview]]] - def getJobDetails(jobId: String): Future[Option[JobDetails]] + def getJobDetails(jobId: JobID): Future[Option[JobDetails]] - def getJobConfig(jobId: String): Future[flinkRestModel.ExecutionConfig] + def getJobConfig(jobId: JobID): Future[flinkRestModel.ExecutionConfig] - def cancel(deploymentId: ExternalDeploymentId): Future[Unit] + def cancel(jobId: JobID): Future[Unit] - def makeSavepoint(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] + def makeSavepoint(jobId: JobID, savepointDir: Option[String]): Future[SavepointResult] - def stop(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] + def stop(jobId: JobID, savepointDir: Option[String]): Future[SavepointResult] def getClusterOverview: Future[ClusterOverview] @@ -37,8 +37,8 @@ trait FlinkClient { mainClass: String, args: List[String], savepointPath: Option[String], - jobId: Option[String] - ): Future[Option[ExternalDeploymentId]] + jobId: Option[JobID] + ): Future[Option[JobID]] } diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/HttpFlinkClient.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/HttpFlinkClient.scala index d2e38f7351e..ebb82d7cb50 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/HttpFlinkClient.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/HttpFlinkClient.scala @@ -1,9 +1,9 @@ package pl.touk.nussknacker.engine.management.rest import com.typesafe.scalalogging.LazyLogging +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.Configuration import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, SavepointResult, WithDataFreshnessStatus} -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.rest.flinkRestModel._ import pl.touk.nussknacker.engine.sttp.SttpJson import pl.touk.nussknacker.engine.sttp.SttpJson.asOptionalJson @@ -104,18 +104,18 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, .recoverWith(recoverWithMessage("retrieve Flink jobs")) } - override def getJobDetails(jobId: String): Future[Option[JobDetails]] = { + override def getJobDetails(jobId: JobID): Future[Option[JobDetails]] = { basicRequest - .get(flinkUrl.addPath("jobs", jobId)) + .get(flinkUrl.addPath("jobs", jobId.toHexString)) .response(asOptionalJson[JobDetails]) .send(backend) .flatMap(SttpJson.failureToFuture) .recoverWith(recoverWithMessage("retrieve Flink job details")) } - override def getJobConfig(jobId: String): Future[flinkRestModel.ExecutionConfig] = { + override def getJobConfig(jobId: JobID): Future[flinkRestModel.ExecutionConfig] = { basicRequest - .get(flinkUrl.addPath("jobs", jobId, "config")) + .get(flinkUrl.addPath("jobs", jobId.toHexString, "config")) .response(asJson[JobConfig]) .send(backend) .flatMap(SttpJson.failureToFuture) @@ -123,8 +123,8 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, } // FIXME: get rid of sleep, refactor? - def waitForSavepoint( - jobId: ExternalDeploymentId, + private def waitForSavepoint( + jobId: JobID, savepointId: String, timeoutLeft: Long = jobManagerTimeout.toMillis ): Future[SavepointResult] = { @@ -133,7 +133,7 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, return Future.failed(new Exception(s"Failed to complete savepoint in time for $jobId and trigger $savepointId")) } basicRequest - .get(flinkUrl.addPath("jobs", jobId.value, "savepoints", savepointId)) + .get(flinkUrl.addPath("jobs", jobId.toHexString, "savepoints", savepointId)) .response(asJson[GetSavepointStatusResponse]) .send(backend) .flatMap(SttpJson.failureToFuture) @@ -153,9 +153,9 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, } } - override def cancel(deploymentId: ExternalDeploymentId): Future[Unit] = { + override def cancel(jobId: JobID): Future[Unit] = { basicRequest - .patch(flinkUrl.addPath("jobs", deploymentId.value)) + .patch(flinkUrl.addPath("jobs", jobId.toHexString)) .send(backend) .flatMap(handleUnitResponse("cancel scenario")) .recoverWith(recoverWithMessage("cancel scenario")) @@ -163,26 +163,26 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, } override def makeSavepoint( - deploymentId: ExternalDeploymentId, + jobId: JobID, savepointDir: Option[String] ): Future[SavepointResult] = { val savepointRequest = basicRequest - .post(flinkUrl.addPath("jobs", deploymentId.value, "savepoints")) + .post(flinkUrl.addPath("jobs", jobId.toHexString, "savepoints")) .body(SavepointTriggerRequest(`target-directory` = savepointDir, `cancel-job` = false)) - processSavepointRequest(deploymentId, savepointRequest, "make savepoint") + processSavepointRequest(jobId, savepointRequest, "make savepoint") } - override def stop(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] = { + override def stop(jobId: JobID, savepointDir: Option[String]): Future[SavepointResult] = { // because of https://issues.apache.org/jira/browse/FLINK-28758 we can't use '/stop' endpoint, // so jobs ends up in CANCELED state, not FINISHED - we should switch back when we get rid of old Kafka source val stopRequest = basicRequest - .post(flinkUrl.addPath("jobs", deploymentId.value, "savepoints")) + .post(flinkUrl.addPath("jobs", jobId.toHexString, "savepoints")) .body(SavepointTriggerRequest(`target-directory` = savepointDir, `cancel-job` = true)) - processSavepointRequest(deploymentId, stopRequest, "stop scenario") + processSavepointRequest(jobId, stopRequest, "stop scenario") } private def processSavepointRequest( - deploymentId: ExternalDeploymentId, + jobId: JobID, request: RequestT[Identity, Either[String, String], Any], action: String ): Future[SavepointResult] = { @@ -191,7 +191,7 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, .send(backend) .flatMap(SttpJson.failureToFuture) .flatMap { response => - waitForSavepoint(deploymentId, response.`request-id`) + waitForSavepoint(jobId, response.`request-id`) } .recoverWith(recoverWithMessage(action)) } @@ -203,8 +203,8 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, mainClass: String, args: List[String], savepointPath: Option[String], - jobId: Option[String] - ): Future[Option[ExternalDeploymentId]] = { + jobId: Option[JobID] + ): Future[Option[JobID]] = { val program = DeployProcessRequest( entryClass = mainClass, @@ -219,7 +219,7 @@ class HttpFlinkClient(restUrl: URI, scenarioStateRequestTimeout: FiniteDuration, .response(asJson[RunResponse]) .send(backend) .flatMap(SttpJson.failureToFuture) - .map(ret => Some(ExternalDeploymentId(ret.jobid))) + .map(ret => Some(ret.jobid)) .recover({ // sometimes deploying takes too long, which causes TimeoutException while waiting for deploy response // workaround for now, not the best solution though diff --git a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/flinkRestModel.scala b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/flinkRestModel.scala index 8027e95f53f..52a5cd60ff8 100644 --- a/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/flinkRestModel.scala +++ b/engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/rest/flinkRestModel.scala @@ -1,7 +1,9 @@ package pl.touk.nussknacker.engine.management.rest +import io.circe.{Decoder, Encoder} import io.circe.generic.JsonCodec import org.apache.flink.api.common +import org.apache.flink.api.common.JobID object flinkRestModel { @@ -11,7 +13,7 @@ object flinkRestModel { programArgsList: List[String], parallelism: Int = common.ExecutionConfig.PARALLELISM_DEFAULT, allowNonRestoredState: Boolean = true, - jobId: Option[String] + jobId: Option[JobID] ) @JsonCodec(encodeOnly = true) case class SavepointTriggerRequest( @@ -19,8 +21,6 @@ object flinkRestModel { `cancel-job`: Boolean ) - @JsonCodec(encodeOnly = true) case class StopRequest(targetDirectory: Option[String], drain: Boolean) - @JsonCodec(decodeOnly = true) case class SavepointTriggerResponse(`request-id`: String) @JsonCodec(decodeOnly = true) case class GetSavepointStatusResponse( @@ -53,7 +53,7 @@ object flinkRestModel { // NOTE: Flink <1.10 compatibility - JobStatus changed package, so we use String here @JsonCodec(decodeOnly = true) case class JobOverview( - jid: String, + jid: JobID, name: String, `last-modification`: Long, `start-time`: Long, @@ -110,7 +110,7 @@ object flinkRestModel { def total: Int } - @JsonCodec(decodeOnly = true) case class JobConfig(jid: String, `execution-config`: ExecutionConfig) + @JsonCodec(decodeOnly = true) case class JobConfig(jid: JobID, `execution-config`: ExecutionConfig) @JsonCodec(decodeOnly = true) case class ExecutionConfig( `job-parallelism`: Int, @@ -127,6 +127,9 @@ object flinkRestModel { @JsonCodec(decodeOnly = true) case class KeyValueEntry(key: String, value: String) - @JsonCodec case class RunResponse(jobid: String) + @JsonCodec case class RunResponse(jobid: JobID) + + implicit val jobIdEncoder: Encoder[JobID] = Encoder[String].contramap[JobID](_.toHexString) + implicit val jobIdDecoder: Decoder[JobID] = Decoder[String].map(JobID.fromHexString) } diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManagerSpec.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManagerSpec.scala index ae8eb1f7936..b9da2ddeea8 100644 --- a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManagerSpec.scala +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkDeploymentManagerSpec.scala @@ -6,7 +6,8 @@ import com.github.tomakehurst.wiremock.WireMockServer import com.github.tomakehurst.wiremock.client.WireMock._ import com.typesafe.config.ConfigFactory import io.circe.Json.{fromString, fromValues} -import org.apache.flink.api.common.JobStatus +import org.apache.flink.api.common.{JobID, JobStatus} +import org.scalatest.LoneElement import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers @@ -14,15 +15,14 @@ import pl.touk.nussknacker.engine.DeploymentManagerDependencies import pl.touk.nussknacker.engine.api.component.NodesDeploymentData import pl.touk.nussknacker.engine.api.deployment.DeploymentUpdateStrategy.StateRestoringStrategy import pl.touk.nussknacker.engine.api.deployment._ -import pl.touk.nussknacker.engine.api.deployment.inconsistency.InconsistentStateDetector import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} import pl.touk.nussknacker.engine.api.{MetaData, ProcessVersion, StreamMetaData} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment._ import pl.touk.nussknacker.engine.flink.minicluster.scenariotesting.ScenarioStateVerificationConfig import pl.touk.nussknacker.engine.management.rest.flinkRestModel._ +import pl.touk.nussknacker.engine.management.utils.JobIdGenerator.generateJobId import pl.touk.nussknacker.engine.testing.LocalModelData import pl.touk.nussknacker.test.{AvailablePortFinder, PatientScalaFutures} import sttp.client3.asynchttpclient.future.AsyncHttpClientFutureBackend @@ -31,15 +31,15 @@ import sttp.client3.{Response, StringBody, SttpBackend, SttpClientException} import sttp.model.{Method, StatusCode} import java.net.NoRouteToHostException +import java.util.Collections import java.util.concurrent.TimeoutException -import java.util.{Collections, UUID} import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} //TODO move some tests to FlinkHttpClientTest -class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientScalaFutures { +class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientScalaFutures with LoneElement { private implicit val freshnessPolicy: DataFreshnessPolicy = DataFreshnessPolicy.Fresh @@ -49,7 +49,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS private var statuses: List[JobOverview] = List() - private var configs: Map[String, ExecutionConfig] = Map() + private var configs: Map[JobID, ExecutionConfig] = Map() private val uploadedJarPath = "file" @@ -64,7 +64,9 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS AdditionalModelConfigs.empty ) - private val returnedJobId = "jobId" + private val sampleJobId = generateJobId + + private val returnedJobId = generateJobId private val canonicalProcess: CanonicalProcess = CanonicalProcess(MetaData("p1", StreamMetaData(Some(1))), Nil, Nil) @@ -89,7 +91,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS freeSlots )._1 - private case class HistoryEntry(operation: String, jobId: Option[String]) + private case class HistoryEntry(operation: String, jobId: Option[JobID]) private def createManagerWithHistory( statuses: List[JobOverview] = List(), @@ -110,36 +112,38 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS case (List("jobs", "overview"), Method.GET) => history.append(HistoryEntry("overview", None)) JobsResponse(statuses) - case (List("jobs", jobId, "config"), Method.GET) => + case (List("jobs", jobIdString, "config"), Method.GET) => + val jobId = JobID.fromHexString(jobIdString) history.append(HistoryEntry("config", Some(jobId))) JobConfig( jobId, configs.getOrElse( - jobId, + JobID.fromHexString(jobIdString), ExecutionConfig( `job-parallelism` = 1, `user-config` = Map( "processId" -> fromString("123"), "versionId" -> fromString("1"), - "deploymentId" -> fromString(jobId), + "deploymentId" -> fromString(jobIdString), "user" -> fromString("user1"), "labels" -> fromValues(List.empty) ) ) ) ) - case (List("jobs", jobId), Method.PATCH) if acceptCancel => - history.append(HistoryEntry("cancel", Some(jobId))) + case (List("jobs", jobIdString), Method.PATCH) if acceptCancel => + history.append(HistoryEntry("cancel", Some(JobID.fromHexString(jobIdString)))) () - case (List("jobs", jobId, "savepoints"), Method.POST) if acceptSavepoint || acceptStop => + case (List("jobs", jobIdString, "savepoints"), Method.POST) if acceptSavepoint || acceptStop => val operation = req.body match { case StringBody(s, _, _) if s.contains(""""cancel-job":true""") => "stop" case _ => "makeSavepoint" } - history.append(HistoryEntry(operation, Some(jobId))) + history.append(HistoryEntry(operation, Some(JobID.fromHexString(jobIdString)))) SavepointTriggerResponse(`request-id` = savepointRequestId) - case (List("jobs", jobId, "savepoints", `savepointRequestId`), Method.GET) if acceptSavepoint || acceptStop => - history.append(HistoryEntry("getSavepoints", Some(jobId))) + case (List("jobs", jobIdString, "savepoints", `savepointRequestId`), Method.GET) + if acceptSavepoint || acceptStop => + history.append(HistoryEntry("getSavepoints", Some(JobID.fromHexString(jobIdString)))) buildFinishedSavepointResponse(savepointPath) case (List("jars"), Method.GET) => history.append(HistoryEntry("getJars", None)) @@ -168,7 +172,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS } test("continue on timeout exception") { - statuses = List(JobOverview("2343", "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) + statuses = List(JobOverview(sampleJobId, "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) createManager(statuses, acceptDeploy = true, exceptionOnDeploy = Some(new TimeoutException("tooo looong"))) .processCommand( @@ -185,7 +189,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS } test("not continue on random network exception") { - statuses = List(JobOverview("2343", "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) + statuses = List(JobOverview(sampleJobId, "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) val manager = createManager(statuses, acceptDeploy = true, exceptionOnDeploy = Some(new NoRouteToHostException("heeelo?"))) @@ -241,7 +245,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS } test("allow deploy if process is failed") { - statuses = List(JobOverview("2343", "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) + statuses = List(JobOverview(sampleJobId, "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) createManager(statuses, acceptDeploy = true) .processCommand( @@ -254,11 +258,11 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS ) ) ) - .futureValue shouldBe Some(ExternalDeploymentId(returnedJobId)) + .futureValue shouldBe Some(ExternalDeploymentId(returnedJobId.toHexString)) } test("allow deploy and make savepoint if process is running") { - statuses = List(JobOverview("2343", "p1", 10L, 10L, JobStatus.RUNNING.name(), tasksOverview(running = 1))) + statuses = List(JobOverview(sampleJobId, "p1", 10L, 10L, JobStatus.RUNNING.name(), tasksOverview(running = 1))) createManager(statuses, acceptDeploy = true, acceptSavepoint = true) .processCommand( @@ -271,7 +275,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS ) ) ) - .futureValue shouldBe Some(ExternalDeploymentId(returnedJobId)) + .futureValue shouldBe Some(ExternalDeploymentId(returnedJobId.toHexString)) } test("should make savepoint") { @@ -301,9 +305,8 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS JobStatus.SUSPENDED.name(), JobStatus.RECONCILING.name() ) - statuses = cancellableStatuses.map(status => - JobOverview(UUID.randomUUID().toString, s"process_$status", 10L, 10L, status, tasksOverview()) - ) + statuses = + cancellableStatuses.map(status => JobOverview(sampleJobId, s"process_$status", 10L, 10L, status, tasksOverview())) val (manager, history) = createManagerWithHistory(statuses) @@ -316,22 +319,21 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS } test("allow cancel specific deployment") { - val processName = ProcessName("process1") - val fooDeploymentId = DeploymentId("foo") - val barDeploymentId = DeploymentId("bar") + val processName = ProcessName("process1") + val fooJobId = generateJobId + val barJobId = generateJobId - val deploymentIds = List(fooDeploymentId, barDeploymentId) - statuses = deploymentIds.map(deploymentId => - JobOverview(deploymentId.value, processName.value, 10L, 10L, JobStatus.RUNNING.name(), tasksOverview()) - ) - configs = deploymentIds - .map(deploymentId => - deploymentId.value -> ExecutionConfig( + val jobIds = List(fooJobId, barJobId) + statuses = + jobIds.map(jobId => JobOverview(jobId, processName.value, 10L, 10L, JobStatus.RUNNING.name(), tasksOverview())) + configs = jobIds + .map(jobId => + jobId -> ExecutionConfig( 1, Map( "processId" -> fromString("123"), "versionId" -> fromString("1"), - "deploymentId" -> fromString(deploymentId.value), + "deploymentId" -> fromString(jobId.toHexString), "user" -> fromString("user1"), "labels" -> fromValues(List.empty) ) @@ -342,11 +344,13 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS val (manager, history) = createManagerWithHistory(statuses) manager - .processCommand(DMCancelDeploymentCommand(processName, fooDeploymentId, User("user1", "user1"))) + .processCommand( + DMCancelDeploymentCommand(processName, DeploymentId(fooJobId.toHexString), User("user1", "user1")) + ) .futureValue shouldBe (()) - history should contain(HistoryEntry("cancel", Some(fooDeploymentId.value))) - history should not contain HistoryEntry("cancel", Some(barDeploymentId.value)) + history should contain(HistoryEntry("cancel", Some(fooJobId))) + history should not contain HistoryEntry("cancel", Some(barJobId)) } test("cancel duplicate processes which are in non terminal state") { @@ -355,8 +359,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS JobStatus.RUNNING.name(), JobStatus.FAILED.name() ) - statuses = - jobStatuses.map(status => JobOverview(UUID.randomUUID().toString, "test", 10L, 10L, status, tasksOverview())) + statuses = jobStatuses.map(status => JobOverview(generateJobId, "test", 10L, 10L, status, tasksOverview())) val (manager, history) = createManagerWithHistory(statuses) @@ -369,7 +372,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS } test("allow cancel but do not sent cancel request if process is failed") { - statuses = List(JobOverview("2343", "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) + statuses = List(JobOverview(sampleJobId, "p1", 10L, 10L, JobStatus.FAILED.name(), tasksOverview(failed = 1))) val (manager, history) = createManagerWithHistory(statuses, acceptCancel = false) manager.processCommand(DMCancelScenarioCommand(ProcessName("p1"), User("test_id", "Jack"))).futureValue shouldBe ( @@ -378,163 +381,11 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS history.filter(_.operation == "cancel") shouldBe Nil } - // TODO: extract test for InconsistentStateDetector - test("return failed status if two jobs running") { - statuses = List( - JobOverview("2343", "p1", 10L, 10L, JobStatus.RUNNING.name(), tasksOverview(running = 1)), - JobOverview("1111", "p1", 30L, 30L, JobStatus.RUNNING.name(), tasksOverview(running = 1)) - ) - - val manager = createManager(statuses) - val returnedStatuses = manager.getProcessStates(ProcessName("p1")).map(_.value).futureValue - InconsistentStateDetector.extractAtMostOneStatus(returnedStatuses) shouldBe Some( - StatusDetails( - ProblemStateStatus.MultipleJobsRunning, - Some(DeploymentId("1111")), - Some(ExternalDeploymentId("1111")), - Some( - ProcessVersion( - VersionId(1), - ProcessName("p1"), - ProcessId(123), - List.empty, - "user1", - None - ) - ), - startTime = Some(30L), - errors = List("Expected one job, instead: 1111 - RUNNING, 2343 - RUNNING") - ) - ) - } - - // TODO: extract test for InconsistentStateDetector - test("return failed status if two in non-terminal state") { - statuses = List( - JobOverview("2343", "p1", 10L, 10L, JobStatus.RUNNING.name(), tasksOverview(running = 1)), - JobOverview("1111", "p1", 30L, 30L, JobStatus.RESTARTING.name(), tasksOverview()) - ) - - val manager = createManager(statuses) - val returnedStatuses = manager.getProcessStates(ProcessName("p1")).map(_.value).futureValue - InconsistentStateDetector.extractAtMostOneStatus(returnedStatuses) shouldBe Some( - StatusDetails( - ProblemStateStatus.MultipleJobsRunning, - Some(DeploymentId("1111")), - Some(ExternalDeploymentId("1111")), - Some( - ProcessVersion( - VersionId(1), - ProcessName("p1"), - ProcessId(123), - List.empty, - "user1", - None - ) - ), - startTime = Some(30L), - errors = List("Expected one job, instead: 1111 - RESTARTING, 2343 - RUNNING") - ) - ) - } - - // TODO: extract test for InconsistentStateDetector - test("return running status if cancelled job has last-modification date later then running job") { - statuses = List( - JobOverview("2343", "p1", 20L, 10L, JobStatus.RUNNING.name(), tasksOverview(running = 1)), - JobOverview("1111", "p1", 30L, 5L, JobStatus.CANCELED.name(), tasksOverview(canceled = 1)), - JobOverview("2222", "p1", 30L, 5L, JobStatus.CANCELLING.name(), tasksOverview(canceling = 1)) - ) - - val manager = createManager(statuses) - val returnedStatuses = manager.getProcessStates(ProcessName("p1")).map(_.value).futureValue - InconsistentStateDetector.extractAtMostOneStatus(returnedStatuses) shouldBe Some( - StatusDetails( - SimpleStateStatus.Running, - Some(DeploymentId("2343")), - Some(ExternalDeploymentId("2343")), - Some( - ProcessVersion( - VersionId(1), - ProcessName("p1"), - ProcessId(123), - List.empty, - "user1", - None - ) - ), - startTime = Some(10L) - ) - ) - } - - // TODO: extract test for InconsistentStateDetector - test("return last terminal state if not running") { - statuses = List( - JobOverview("2343", "p1", 40L, 10L, JobStatus.FINISHED.name(), tasksOverview(finished = 1)), - JobOverview("1111", "p1", 35L, 30L, JobStatus.FINISHED.name(), tasksOverview(finished = 1)) - ) - - val manager = createManager(statuses) - val returnedStatuses = manager.getProcessStates(ProcessName("p1")).map(_.value).futureValue - InconsistentStateDetector.extractAtMostOneStatus(returnedStatuses) shouldBe Some( - StatusDetails( - SimpleStateStatus.Finished, - Some(DeploymentId("2343")), - Some(ExternalDeploymentId("2343")), - Some( - ProcessVersion( - VersionId(1), - ProcessName("p1"), - ProcessId(123), - List.empty, - "user1", - None - ) - ), - startTime = Some(10L) - ) - ) - - } - - // TODO: extract test for InconsistentStateDetector - test("return non-terminal state if not running") { - statuses = List( - JobOverview("2343", "p1", 40L, 10L, JobStatus.FINISHED.name(), tasksOverview(finished = 1)), - JobOverview("1111", "p1", 35L, 30L, JobStatus.RESTARTING.name(), tasksOverview()) - ) - - val manager = createManager(statuses) - val returnedStatuses = manager.getProcessStates(ProcessName("p1")).map(_.value).futureValue - InconsistentStateDetector.extractAtMostOneStatus(returnedStatuses) shouldBe Some( - StatusDetails( - SimpleStateStatus.Restarting, - Some(DeploymentId("1111")), - Some(ExternalDeploymentId("1111")), - Some( - ProcessVersion( - VersionId(1), - ProcessName("p1"), - ProcessId(123), - List.empty, - "user1", - None - ) - ), - startTime = Some(30L) - ) - ) - } - test("return process version the same as configured") { - val jid = "2343" + val jid = sampleJobId val processName = ProcessName("p1") val version = 15L val deploymentId = "789" - val user = "user1" - val processId = ProcessId(6565L) - val labels = List("tag1", "tag2") statuses = List(JobOverview(jid, processName.value, 40L, 10L, JobStatus.FINISHED.name(), tasksOverview(finished = 1))) @@ -543,23 +394,18 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS jid -> ExecutionConfig( 1, Map( - "processId" -> fromString(processId.value.toString), "versionId" -> fromString(version.toString), "deploymentId" -> fromString(deploymentId), - "user" -> fromString(user), - "labels" -> fromValues(labels.map(fromString)) ) ) ) val manager = createManager(statuses) - manager.getProcessStates(processName).map(_.value).futureValue shouldBe List( - StatusDetails( + manager.getScenarioDeploymentsStatuses(processName).map(_.value).futureValue shouldBe List( + DeploymentStatusDetails( SimpleStateStatus.Finished, Some(DeploymentId(deploymentId)), - Some(ExternalDeploymentId("2343")), - Some(ProcessVersion(VersionId(version), processName, processId, labels, user, None)), - Some(10L) + Some(VersionId(version)), ) ) } @@ -589,13 +435,13 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS stubWithFixedDelay(durationLongerThanClientTimeout) a[SttpClientException.TimeoutException] shouldBe thrownBy { manager - .getProcessStates(ProcessName("p1")) + .getScenarioDeploymentsStatuses(ProcessName("p1")) .futureValueEnsuringInnerException(durationLongerThanClientTimeout) } stubWithFixedDelay(0.seconds) val resultWithoutDelay = manager - .getProcessStates(ProcessName("p1")) + .getScenarioDeploymentsStatuses(ProcessName("p1")) .map(_.value) .futureValue(Timeout(durationLongerThanClientTimeout.plus(1 second))) resultWithoutDelay shouldEqual List.empty @@ -629,7 +475,7 @@ class FlinkDeploymentManagerSpec extends AnyFunSuite with Matchers with PatientS private def buildRunningJobOverview(processName: ProcessName): JobOverview = { JobOverview( - jid = "1111", + jid = sampleJobId, name = processName.value, `last-modification` = System.currentTimeMillis(), `start-time` = System.currentTimeMillis(), diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkProcessStateSpec.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkProcessStateSpec.scala deleted file mode 100644 index f85af8d6ae9..00000000000 --- a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkProcessStateSpec.scala +++ /dev/null @@ -1,42 +0,0 @@ -package pl.touk.nussknacker.engine.management - -import org.scalatest.Inside -import org.scalatest.funspec.AnyFunSpec -import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.ProcessVersion -import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus -import pl.touk.nussknacker.engine.api.deployment.{ProcessState, ScenarioActionName, StateStatus, StatusDetails} -import pl.touk.nussknacker.engine.api.process.VersionId -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId - -class FlinkProcessStateSpec extends AnyFunSpec with Matchers with Inside { - - def createProcessState(stateStatus: StateStatus): ProcessState = - FlinkProcessStateDefinitionManager.processState( - StatusDetails(stateStatus, None, Some(ExternalDeploymentId("12")), Some(ProcessVersion.empty)), - VersionId(1), - None, - None, - ) - - it("scenario state should be during deploy") { - val state = createProcessState(SimpleStateStatus.DuringDeploy) - state.allowedActions shouldBe List(ScenarioActionName.Cancel) - } - - it("scenario state should be running") { - val state = createProcessState(SimpleStateStatus.Running) - state.allowedActions shouldBe List(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) - } - - it("scenario state should be finished") { - val state = createProcessState(SimpleStateStatus.Finished) - state.allowedActions shouldBe List(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) - } - - it("scenario state should be restarting") { - val state = createProcessState(SimpleStateStatus.Restarting) - state.allowedActions shouldBe List(ScenarioActionName.Cancel) - } - -} diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkScenarioStatusDtoSpec.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkScenarioStatusDtoSpec.scala new file mode 100644 index 00000000000..a049322740b --- /dev/null +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkScenarioStatusDtoSpec.scala @@ -0,0 +1,44 @@ +package pl.touk.nussknacker.engine.management + +import org.scalatest.Inside +import org.scalatest.funsuite.AnyFunSuiteLike +import org.scalatest.matchers.should.Matchers +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.{ + ScenarioStatusPresentationDetails, + ScenarioStatusWithScenarioContext +} +import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus +import pl.touk.nussknacker.engine.api.deployment.{ScenarioActionName, StateStatus} + +class FlinkScenarioStatusDtoSpec extends AnyFunSuiteLike with Matchers with Inside { + + def statusPresentation(stateStatus: StateStatus): ScenarioStatusPresentationDetails = + FlinkProcessStateDefinitionManager.statusPresentation( + ScenarioStatusWithScenarioContext( + stateStatus, + None, + None, + ) + ) + + test("scenario state should be during deploy") { + val state = statusPresentation(SimpleStateStatus.DuringDeploy) + state.allowedActions shouldBe Set(ScenarioActionName.Cancel) + } + + test("scenario state should be running") { + val state = statusPresentation(SimpleStateStatus.Running) + state.allowedActions shouldBe Set(ScenarioActionName.Cancel, ScenarioActionName.Pause, ScenarioActionName.Deploy) + } + + test("scenario state should be finished") { + val state = statusPresentation(SimpleStateStatus.Finished) + state.allowedActions shouldBe Set(ScenarioActionName.Deploy, ScenarioActionName.Archive, ScenarioActionName.Rename) + } + + test("scenario state should be restarting") { + val state = statusPresentation(SimpleStateStatus.Restarting) + state.allowedActions shouldBe Set(ScenarioActionName.Cancel) + } + +} diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkSlotsCheckerTest.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkSlotsCheckerTest.scala index 3d3fb89d36b..a13eddfe423 100644 --- a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkSlotsCheckerTest.scala +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/FlinkSlotsCheckerTest.scala @@ -1,20 +1,21 @@ package pl.touk.nussknacker.engine.management +import org.apache.flink.api.common.JobID import org.apache.flink.configuration.{Configuration, CoreOptions} import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import pl.touk.nussknacker.engine.build.ScenarioBuilder -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.FlinkSlotsChecker.{NotEnoughSlotsException, SlotsBalance} import pl.touk.nussknacker.engine.management.rest.HttpFlinkClient import pl.touk.nussknacker.engine.management.rest.flinkRestModel._ +import pl.touk.nussknacker.engine.management.utils.JobIdGenerator.generateJobId import pl.touk.nussknacker.test.PatientScalaFutures import sttp.client3.testing.SttpBackendStub import sttp.client3.{Response, SttpBackend, SttpClientException} import sttp.model.{Method, StatusCode} import java.net.{ConnectException, URI} -import java.util.Collections +import java.util.{Collections, UUID} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} @@ -40,12 +41,13 @@ class FlinkSlotsCheckerTest extends AnyFunSuite with Matchers with PatientScalaF } test("take an account of slots that will be released be job that will be cancelled during redeploy") { - val slotsChecker = createSlotsChecker() + val slotsChecker = createSlotsChecker() + val someCurrentJobId = generateJobId // +1 because someCurrentJobId uses one slot now slotsChecker .checkRequiredSlotsExceedAvailableSlots( prepareCanonicalProcess(Some(availableSlotsCount + 1)), - List(ExternalDeploymentId("someCurrentJobId")) + List(someCurrentJobId) ) .futureValue @@ -53,7 +55,7 @@ class FlinkSlotsCheckerTest extends AnyFunSuite with Matchers with PatientScalaF slotsChecker .checkRequiredSlotsExceedAvailableSlots( prepareCanonicalProcess(Some(requestedSlotsCount)), - List(ExternalDeploymentId("someCurrentJobId")) + List(someCurrentJobId) ) .failed .futureValue shouldEqual @@ -98,8 +100,11 @@ class FlinkSlotsCheckerTest extends AnyFunSuite with Matchers with PatientScalaF val toReturn = (req.uri.path, req.method) match { case (List("jobs", "overview"), Method.GET) => JobsResponse(statuses) - case (List("jobs", jobId, "config"), Method.GET) => - JobConfig(jobId, ExecutionConfig(`job-parallelism` = 1, `user-config` = Map.empty)) + case (List("jobs", jobIdString, "config"), Method.GET) => + JobConfig( + JobID.fromHexString(jobIdString), + ExecutionConfig(`job-parallelism` = 1, `user-config` = Map.empty) + ) case (List("overview"), Method.GET) => clusterOverviewResult.recoverWith { case ex: Exception => Failure(SttpClientException.defaultExceptionToSttpClientException(req, ex).get) diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClientTest.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClientTest.scala index 98880dcbf5b..cdf60e43c9a 100644 --- a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClientTest.scala +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/CachedFlinkClientTest.scala @@ -1,7 +1,7 @@ package pl.touk.nussknacker.engine.management.rest import io.circe.Json -import org.apache.flink.api.common.JobStatus +import org.apache.flink.api.common.{JobID, JobStatus} import org.mockito.ArgumentMatchers.any import org.mockito.Mockito.{times, verify, when} import org.mockito.invocation.InvocationOnMock @@ -11,8 +11,10 @@ import org.scalatest.matchers.should.Matchers import org.scalatestplus.mockito.MockitoSugar import pl.touk.nussknacker.engine.api.deployment.{DataFreshnessPolicy, WithDataFreshnessStatus} import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{JobOverview, JobTasksOverview} +import pl.touk.nussknacker.engine.management.utils.JobIdGenerator.generateJobId import pl.touk.nussknacker.test.PatientScalaFutures +import java.util.UUID import scala.concurrent.Future import scala.concurrent.duration._ @@ -23,6 +25,8 @@ class CachedFlinkClientTest with Matchers with OptionValues { + private val sampleJobId = generateJobId + test("should ask delegate for a fresh jobs by name each time") { val delegate = prepareMockedFlinkClient val cachingFlinkClient = new CachedFlinkClient(delegate, 10 seconds, 10) @@ -56,21 +60,21 @@ class CachedFlinkClientTest val cachingFlinkClient = new CachedFlinkClient(delegate, 10 seconds, 10) val results = List( - cachingFlinkClient.getJobConfig("foo").futureValue, - cachingFlinkClient.getJobConfig("foo").futureValue, - cachingFlinkClient.getJobConfig("foo").futureValue, + cachingFlinkClient.getJobConfig(sampleJobId).futureValue, + cachingFlinkClient.getJobConfig(sampleJobId).futureValue, + cachingFlinkClient.getJobConfig(sampleJobId).futureValue, ) results.map(_.`user-config`.get("time")).distinct should have size 1 - verify(delegate, times(1)).getJobConfig(any[String]) + verify(delegate, times(1)).getJobConfig(any[JobID]) } test("shouldn't cache job configs with missing deploymentId") { val delegate = mock[FlinkClient] val cachingFlinkClient = new CachedFlinkClient(delegate, 10 seconds, 10) - when(delegate.getJobConfig(any[String])).thenAnswer { _: InvocationOnMock => + when(delegate.getJobConfig(any[JobID])).thenAnswer { _: InvocationOnMock => val config = flinkRestModel.ExecutionConfig( `job-parallelism` = 1, `user-config` = Map.empty @@ -78,12 +82,12 @@ class CachedFlinkClientTest Future.successful(config) } cachingFlinkClient - .getJobConfig("foo") + .getJobConfig(sampleJobId) .futureValue .`user-config` .get(CachedFlinkClient.DeploymentIdUserConfigKey) shouldBe empty - when(delegate.getJobConfig(any[String])).thenAnswer { _: InvocationOnMock => + when(delegate.getJobConfig(any[JobID])).thenAnswer { _: InvocationOnMock => val config = flinkRestModel.ExecutionConfig( `job-parallelism` = 1, `user-config` = Map(CachedFlinkClient.DeploymentIdUserConfigKey -> Json.fromString("someDeploymentId")) @@ -91,7 +95,7 @@ class CachedFlinkClientTest Future.successful(config) } cachingFlinkClient - .getJobConfig("foo") + .getJobConfig(sampleJobId) .futureValue .`user-config` .get(CachedFlinkClient.DeploymentIdUserConfigKey) shouldBe defined @@ -103,7 +107,7 @@ class CachedFlinkClientTest when(delegate.getJobsOverviews()(any[DataFreshnessPolicy])).thenAnswer { _: InvocationOnMock => val jobs = List( JobOverview( - "123", + sampleJobId, "p1", 10L, 10L, @@ -115,7 +119,7 @@ class CachedFlinkClientTest Future.successful(WithDataFreshnessStatus.fresh(jobs)) } - when(delegate.getJobConfig(any[String])).thenAnswer { _: InvocationOnMock => + when(delegate.getJobConfig(any[JobID])).thenAnswer { _: InvocationOnMock => val config = flinkRestModel.ExecutionConfig( `job-parallelism` = 1, `user-config` = Map( diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/FlinkHttpClientTest.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/FlinkHttpClientTest.scala index bc5588b13e3..28b0a298582 100644 --- a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/FlinkHttpClientTest.scala +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/rest/FlinkHttpClientTest.scala @@ -5,9 +5,8 @@ import org.scalatest.concurrent.ScalaFutures import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatest.time.Span.convertSpanToDuration -import pl.touk.nussknacker.engine.deployment.ExternalDeploymentId import pl.touk.nussknacker.engine.management.rest.flinkRestModel.{JarFile, JarsResponse, UploadJarResponse} -import pl.touk.nussknacker.engine.newdeployment.DeploymentId +import pl.touk.nussknacker.engine.management.utils.JobIdGenerator.generateJobId import pl.touk.nussknacker.engine.sttp.HttpClientError import pl.touk.nussknacker.test.PatientScalaFutures import sttp.client3.testing.SttpBackendStub @@ -29,7 +28,7 @@ class FlinkHttpClientTest extends AnyFunSuite with Matchers with ScalaFutures wi private val jarFile = new File(s"/tmp/${jarFileName}") private val jarId = s"${UUID.randomUUID()}-example.jar" private val flinkJarFile = JarFile(jarId, jarFileName) - private val deploymentId = ExternalDeploymentId("someDeploymentId") + private val jobId = generateJobId test("uploadJarFileIfNotExists - should upload jar") { implicit val backend: SttpBackendStub[Future, Any] = SttpBackendStub.asynchronousFuture.whenRequestMatchesPartial { @@ -119,7 +118,7 @@ class FlinkHttpClientTest extends AnyFunSuite with Matchers with ScalaFutures wi ) case req if req.uri.path == List("jars", jarId, "run") => Future.failed(HttpError("Error, error".asJson.noSpaces, StatusCode.InternalServerError)) - case req if req.uri.path == List("jobs", deploymentId.value) && req.method == Method.PATCH => + case req if req.uri.path == List("jobs", jobId.toHexString) && req.method == Method.PATCH => Future.failed(HttpError("Error, error".asJson.noSpaces, StatusCode.InternalServerError)) }, None @@ -133,8 +132,8 @@ class FlinkHttpClientTest extends AnyFunSuite with Matchers with ScalaFutures wi } } - checkIfWrapped(flinkClient.cancel(deploymentId)) - checkIfWrapped(flinkClient.runProgram(jarFile, "any", Nil, None, Some(DeploymentId.generate.toString))) + checkIfWrapped(flinkClient.cancel(jobId)) + checkIfWrapped(flinkClient.runProgram(jarFile, "any", Nil, None, Some(jobId))) } private def createHttpClient(implicit backend: SttpBackend[Future, Any]) = diff --git a/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/utils/JobIdGenerator.scala b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/utils/JobIdGenerator.scala new file mode 100644 index 00000000000..6887b16f7e1 --- /dev/null +++ b/engine/flink/management/src/test/scala/pl/touk/nussknacker/engine/management/utils/JobIdGenerator.scala @@ -0,0 +1,14 @@ +package pl.touk.nussknacker.engine.management.utils + +import org.apache.flink.api.common.JobID + +import java.util.UUID + +object JobIdGenerator { + + def generateJobId: JobID = { + val uuid = UUID.randomUUID() + new JobID(uuid.getLeastSignificantBits, uuid.getLeastSignificantBits) + } + +} diff --git a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterConfig.scala b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterConfig.scala index da2d6c873c6..1a9b33c234a 100644 --- a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterConfig.scala +++ b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterConfig.scala @@ -6,6 +6,5 @@ import scala.concurrent.duration.{DurationInt, FiniteDuration} final case class FlinkMiniClusterConfig( config: Configuration = new Configuration, - streamExecutionEnvConfig: Configuration = new Configuration, waitForJobManagerRestAPIAvailableTimeout: FiniteDuration = 10.seconds ) diff --git a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterFactory.scala b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterFactory.scala index dd738c2b0ff..3fa929c1e9e 100644 --- a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterFactory.scala +++ b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterFactory.scala @@ -48,7 +48,7 @@ object FlinkMiniClusterFactory extends LazyLogging { stateVerificationConfig: ScenarioStateVerificationConfig, ): Option[FlinkMiniClusterWithServices] = { if (useMiniClusterForDeployment || scenarioTestingConfig.reuseSharedMiniCluster || stateVerificationConfig.reuseSharedMiniCluster) { - Some(createMiniClusterWithServices(modelClassLoader, config.config, config.streamExecutionEnvConfig)) + Some(createMiniClusterWithServices(modelClassLoader, config.config)) } else { None } @@ -56,19 +56,16 @@ object FlinkMiniClusterFactory extends LazyLogging { def createUnitTestsMiniClusterWithServices( miniClusterConfigOverrides: Configuration = new Configuration, - streamExecutionConfigOverrides: Configuration = new Configuration ): FlinkMiniClusterWithServices = { createMiniClusterWithServices( ModelClassLoader.flinkWorkAroundEmptyClassloader, miniClusterConfigOverrides, - streamExecutionConfigOverrides ) } def createMiniClusterWithServices( modelClassLoader: URLClassLoader, miniClusterConfigOverrides: Configuration, - streamExecutionConfigOverrides: Configuration ): FlinkMiniClusterWithServices = { val miniClusterConfig = DefaultMiniClusterConfig miniClusterConfig.addAll(miniClusterConfigOverrides) @@ -85,7 +82,6 @@ object FlinkMiniClusterFactory extends LazyLogging { FlinkMiniClusterStreamExecutionEnvironmentFactory.createStreamExecutionEnvironment( miniCluster, modelClassLoader, - streamExecutionConfigOverrides, attached ) } diff --git a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterStreamExecutionEnvironmentFactory.scala b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterStreamExecutionEnvironmentFactory.scala index 58b91447fa6..7ddac0d4e88 100644 --- a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterStreamExecutionEnvironmentFactory.scala +++ b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/FlinkMiniClusterStreamExecutionEnvironmentFactory.scala @@ -1,8 +1,8 @@ package pl.touk.nussknacker.engine.flink.minicluster -import org.apache.flink.api.common.JobSubmissionResult +import org.apache.flink.api.common.{JobID, JobSubmissionResult} import org.apache.flink.api.dag.Pipeline -import org.apache.flink.configuration.{Configuration, DeploymentOptions, PipelineOptions} +import org.apache.flink.configuration.{Configuration, DeploymentOptions, PipelineOptions, PipelineOptionsInternal} import org.apache.flink.core.execution.{PipelineExecutor, PipelineExecutorFactory, PipelineExecutorServiceLoader} import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings import org.apache.flink.runtime.minicluster.{MiniCluster, MiniClusterJobClient} @@ -20,10 +20,10 @@ object FlinkMiniClusterStreamExecutionEnvironmentFactory { def createStreamExecutionEnvironment( miniCluster: MiniCluster, modelClassLoader: URLClassLoader, - configuration: Configuration, attached: Boolean ): StreamExecutionEnvironment = { val pipelineExecutorServiceLoader = createPipelineExecutorServiceLoader(miniCluster, modelClassLoader) + val configuration = new Configuration() configuration.set(DeploymentOptions.TARGET, pipelineExecutorName) configuration.set(PipelineOptions.CLASSPATHS, modelClassLoader.getURLs.map(_.toString).toList.asJava) configuration.set[java.lang.Boolean](DeploymentOptions.ATTACHED, attached) @@ -50,7 +50,10 @@ object FlinkMiniClusterStreamExecutionEnvironmentFactory { (pipeline: Pipeline, _: Configuration, userCodeClassloader: ClassLoader) => { pipeline match { case streamGraph: StreamGraph => - val jobGraph = streamGraph.getJobGraph(userCodeClassloader, null) + val jobId = Option(configuration.get(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID)) + .map(JobID.fromHexString) + .orNull + val jobGraph = streamGraph.getJobGraph(userCodeClassloader, jobId) jobGraph.setClasspaths(modelClassLoader.getURLs.toList.asJava) if (jobGraph.getSavepointRestoreSettings == SavepointRestoreSettings.none) jobGraph.setSavepointRestoreSettings(streamGraph.getSavepointRestoreSettings) diff --git a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/scenariotesting/legacysingleuseminicluster/LegacyFallbackToSingleUseMiniClusterHandler.scala b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/scenariotesting/legacysingleuseminicluster/LegacyFallbackToSingleUseMiniClusterHandler.scala index 95011aecaeb..06abba558e7 100644 --- a/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/scenariotesting/legacysingleuseminicluster/LegacyFallbackToSingleUseMiniClusterHandler.scala +++ b/engine/flink/minicluster/src/main/scala/pl/touk/nussknacker/engine/flink/minicluster/scenariotesting/legacysingleuseminicluster/LegacyFallbackToSingleUseMiniClusterHandler.scala @@ -65,7 +65,6 @@ class LegacyFallbackToSingleUseMiniClusterHandler(modelClassLoader: URLClassLoad FlinkMiniClusterFactory.createMiniClusterWithServices( modelClassLoader, legacyMiniClusterConfigOverrides, - new Configuration() ) } diff --git a/engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/FlinkSpec.scala b/engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/FlinkSpec.scala index 8c70d4408a9..9d999be9bc9 100644 --- a/engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/FlinkSpec.scala +++ b/engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/FlinkSpec.scala @@ -24,7 +24,6 @@ trait FlinkSpec extends BeforeAndAfterAll with BeforeAndAfter with WithConfig { flinkMiniCluster = FlinkMiniClusterFactory.createMiniClusterWithServices( ModelClassLoader.flinkWorkAroundEmptyClassloader, prepareFlinkConfiguration(), - new Configuration() ) } diff --git a/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedDeploymentManager.scala b/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedDeploymentManager.scala index 8352a7b9e0e..03567b2764e 100644 --- a/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedDeploymentManager.scala +++ b/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedDeploymentManager.scala @@ -5,7 +5,7 @@ import pl.touk.nussknacker.engine.api._ import pl.touk.nussknacker.engine.api.deployment._ import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.process.ProcessName +import pl.touk.nussknacker.engine.api.process.{ProcessName, VersionId} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.deployment.{DeploymentData, DeploymentId, ExternalDeploymentId} import pl.touk.nussknacker.engine.{ModelData, newdeployment} @@ -27,8 +27,7 @@ class EmbeddedDeploymentManager( deploymentStrategy: DeploymentStrategy )(implicit ec: ExecutionContext) extends LiteDeploymentManager - with LazyLogging - with DeploymentManagerInconsistentStateHandlerMixIn { + with LazyLogging { private val retrieveDeployedScenariosTimeout = 10.seconds @@ -123,7 +122,11 @@ class EmbeddedDeploymentManager( case Success(_) => logger.debug(s"Deployed scenario $processVersion") } - processVersion.processName -> ScenarioDeploymentData(deploymentData.deploymentId, processVersion, interpreterTry) + processVersion.processName -> ScenarioDeploymentData( + deploymentData.deploymentId, + processVersion.versionId, + interpreterTry + ) } private def runInterpreter(processVersion: ProcessVersion, parsedResolvedScenario: CanonicalProcess) = { @@ -168,23 +171,22 @@ class EmbeddedDeploymentManager( } } - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { Future.successful( WithDataFreshnessStatus.fresh( deployments - .get(name) + .get(scenarioName) .map { interpreterData => - StatusDetails( + DeploymentStatusDetails( status = interpreterData.scenarioDeployment .fold( _ => ProblemStateStatus(s"Scenario compilation errors"), deployment => SimpleStateStatus.fromDeploymentStatus(deployment.status()) ), deploymentId = Some(interpreterData.deploymentId), - externalDeploymentId = Some(ExternalDeploymentId(interpreterData.deploymentId.value)), - version = Some(interpreterData.processVersion) + version = Some(interpreterData.scenarioVersionId) ) } .toList @@ -211,7 +213,8 @@ class EmbeddedDeploymentManager( } - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport @@ -227,7 +230,7 @@ class EmbeddedDeploymentManager( private sealed case class ScenarioDeploymentData( deploymentId: DeploymentId, - processVersion: ProcessVersion, + scenarioVersionId: VersionId, scenarioDeployment: Try[Deployment] ) diff --git a/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedProcessStateDefinitionManager.scala b/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedProcessStateDefinitionManager.scala index 83315d6d799..76024d1aa90 100644 --- a/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedProcessStateDefinitionManager.scala +++ b/engine/lite/embeddedDeploymentManager/src/main/scala/pl/touk/nussknacker/engine/embedded/EmbeddedProcessStateDefinitionManager.scala @@ -1,6 +1,6 @@ package pl.touk.nussknacker.engine.embedded -import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ProcessStatus +import pl.touk.nussknacker.engine.api.deployment.ProcessStateDefinitionManager.ScenarioStatusWithScenarioContext import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus} import pl.touk.nussknacker.engine.api.deployment.{OverridingProcessStateDefinitionManager, ScenarioActionName} @@ -11,7 +11,7 @@ import pl.touk.nussknacker.engine.api.deployment.{OverridingProcessStateDefiniti object EmbeddedProcessStateDefinitionManager extends OverridingProcessStateDefinitionManager( delegate = SimpleProcessStateDefinitionManager, - statusActionsPF = { case ProcessStatus(SimpleStateStatus.Restarting, _, _, _) => - List(ScenarioActionName.Cancel) + statusActionsPF = { case ScenarioStatusWithScenarioContext(SimpleStateStatus.Restarting, _, _) => + Set(ScenarioActionName.Cancel) } ) diff --git a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/RequestResponseEmbeddedDeploymentManagerTest.scala b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/RequestResponseEmbeddedDeploymentManagerTest.scala index 5f4992e1a19..a62727d81b7 100644 --- a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/RequestResponseEmbeddedDeploymentManagerTest.scala +++ b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/RequestResponseEmbeddedDeploymentManagerTest.scala @@ -129,7 +129,9 @@ class RequestResponseEmbeddedDeploymentManagerTest fixture.deployScenario(scenario) eventually { - manager.getProcessStates(name).futureValue.value.map(_.status) shouldBe List(SimpleStateStatus.Running) + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) shouldBe List( + SimpleStateStatus.Running + ) } request.body("""{ productId: 15 }""").send(backend).body shouldBe Right("""{"transformed":15}""") @@ -149,7 +151,7 @@ class RequestResponseEmbeddedDeploymentManagerTest manager.processCommand(DMCancelScenarioCommand(name, User("a", "b"))).futureValue - manager.getProcessStates(name).futureValue.value shouldBe List.empty + manager.getScenarioDeploymentsStatuses(name).futureValue.value shouldBe List.empty request.body("""{ productId: 15 }""").send(backend).code shouldBe StatusCode.NotFound } diff --git a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerRestartTest.scala b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerRestartTest.scala index 814529cf5f1..33293c95639 100644 --- a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerRestartTest.scala +++ b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerRestartTest.scala @@ -42,7 +42,7 @@ class StreamingEmbeddedDeploymentManagerRestartTest extends BaseStreamingEmbedde kafkaServerWithDependencies.shutdownKafkaServer() eventually { - val jobStatuses = manager.getProcessStates(name).futureValue.value + val jobStatuses = manager.getScenarioDeploymentsStatuses(name).futureValue.value jobStatuses.map(_.status) shouldBe List(SimpleStateStatus.Restarting) } @@ -51,7 +51,9 @@ class StreamingEmbeddedDeploymentManagerRestartTest extends BaseStreamingEmbedde kafkaServerWithDependencies.startupKafkaServer() eventually { - manager.getProcessStates(name).futureValue.value.map(_.status) shouldBe List(SimpleStateStatus.Running) + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) shouldBe List( + SimpleStateStatus.Running + ) } } diff --git a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerTest.scala b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerTest.scala index aaae53dec82..5def40bad13 100644 --- a/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerTest.scala +++ b/engine/lite/embeddedDeploymentManager/src/test/scala/pl/touk/nussknacker/streaming/embedded/StreamingEmbeddedDeploymentManagerTest.scala @@ -57,7 +57,9 @@ class StreamingEmbeddedDeploymentManagerTest } eventually { - manager.getProcessStates(name).futureValue.value.map(_.status) shouldBe List(SimpleStateStatus.Running) + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) shouldBe List( + SimpleStateStatus.Running + ) } val input = obj("productId" -> fromInt(10)) @@ -67,7 +69,7 @@ class StreamingEmbeddedDeploymentManagerTest wrapInFailingLoader { manager.processCommand(DMCancelScenarioCommand(name, User("a", "b"))).futureValue } - manager.getProcessStates(name).futureValue.value shouldBe List.empty + manager.getScenarioDeploymentsStatuses(name).futureValue.value shouldBe List.empty } test("Run persisted scenario deployments") { @@ -98,7 +100,9 @@ class StreamingEmbeddedDeploymentManagerTest val FixtureParam(manager, _, _, _) = prepareFixture(inputTopic, outputTopic, List(deployedScenarioData)) eventually { - manager.getProcessStates(name).futureValue.value.map(_.status) shouldBe List(SimpleStateStatus.Running) + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) shouldBe List( + SimpleStateStatus.Running + ) } val input = obj("productId" -> fromInt(10)) @@ -107,7 +111,7 @@ class StreamingEmbeddedDeploymentManagerTest kafkaClient.createConsumer().consumeWithJson[Json](outputTopic.name).take(1).head.message() shouldBe input manager.processCommand(DMCancelScenarioCommand(name, User("a", "b"))).futureValue - manager.getProcessStates(name).futureValue.value shouldBe List.empty + manager.getScenarioDeploymentsStatuses(name).futureValue.value shouldBe List.empty } test("Run persisted scenario deployment with scenario json incompatible with current component API") { @@ -141,8 +145,8 @@ class StreamingEmbeddedDeploymentManagerTest ) val FixtureParam(manager, _, _, _) = prepareFixture(inputTopic, outputTopic, List(deployedScenarioData)) - manager.getProcessStates(name).futureValue.value.map(_.status) should matchPattern { - case ProblemStateStatus("Scenario compilation errors", _) :: Nil => + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) should matchPattern { + case ProblemStateStatus("Scenario compilation errors", _, _) :: Nil => } } @@ -228,7 +232,9 @@ class StreamingEmbeddedDeploymentManagerTest fixture.deployScenario(scenarioForOutput("next")) eventually { - manager.getProcessStates(name).futureValue.value.map(_.status) shouldBe List(SimpleStateStatus.Running) + manager.getScenarioDeploymentsStatuses(name).futureValue.value.map(_.status) shouldBe List( + SimpleStateStatus.Running + ) } kafkaClient.sendMessage(inputTopic.name, message("2")).futureValue @@ -239,7 +245,7 @@ class StreamingEmbeddedDeploymentManagerTest manager.processCommand(DMCancelScenarioCommand(name, User("a", "b"))).futureValue - manager.getProcessStates(name).futureValue.value shouldBe List.empty + manager.getScenarioDeploymentsStatuses(name).futureValue.value shouldBe List.empty } test("Performs test from file") { diff --git a/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManager.scala b/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManager.scala index 5c83b0b9aa1..3a4a91a8ffc 100644 --- a/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManager.scala +++ b/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManager.scala @@ -49,8 +49,7 @@ class K8sDeploymentManager( rawConfig: Config, dependencies: DeploymentManagerDependencies ) extends LiteDeploymentManager - with LazyLogging - with DeploymentManagerInconsistentStateHandlerMixIn { + with LazyLogging { import dependencies._ @@ -316,17 +315,16 @@ class K8sDeploymentManager( } } - override def getProcessStates( - name: ProcessName - )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[StatusDetails]]] = { - val mapper = new K8sDeploymentStatusMapper(processStateDefinitionManager) + override def getScenarioDeploymentsStatuses( + scenarioName: ProcessName + )(implicit freshnessPolicy: DataFreshnessPolicy): Future[WithDataFreshnessStatus[List[DeploymentStatusDetails]]] = { for { deployments <- scenarioStateK8sClient - .listSelected[ListResource[Deployment]](requirementForName(name)) + .listSelected[ListResource[Deployment]](requirementForName(scenarioName)) .map(_.items) - pods <- scenarioStateK8sClient.listSelected[ListResource[Pod]](requirementForName(name)).map(_.items) + pods <- scenarioStateK8sClient.listSelected[ListResource[Pod]](requirementForName(scenarioName)).map(_.items) } yield { - WithDataFreshnessStatus.fresh(deployments.map(mapper.status(_, pods))) + WithDataFreshnessStatus.fresh(deployments.map(K8sDeploymentStatusMapper.status(_, pods))) } } @@ -387,7 +385,8 @@ class K8sDeploymentManager( // for each scenario in this case and where store the deploymentId override def deploymentSynchronisationSupport: DeploymentSynchronisationSupport = NoDeploymentSynchronisationSupport - override def stateQueryForAllScenariosSupport: StateQueryForAllScenariosSupport = NoStateQueryForAllScenariosSupport + override def deploymentsStatusesQueryForAllScenariosSupport: DeploymentsStatusesQueryForAllScenariosSupport = + NoDeploymentsStatusesQueryForAllScenariosSupport override def schedulingSupport: SchedulingSupport = NoSchedulingSupport } diff --git a/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapper.scala b/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapper.scala index ebbd1c9c926..c38512a1410 100644 --- a/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapper.scala +++ b/engine/lite/k8sDeploymentManager/src/main/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapper.scala @@ -1,28 +1,16 @@ package pl.touk.nussknacker.k8s.manager +import cats.data.NonEmptyList import com.typesafe.scalalogging.LazyLogging -import io.circe.Json import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.deployment.{ - ProcessState, - ProcessStateDefinitionManager, - StateStatus, - StatusDetails -} +import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatusDetails, ScenarioActionName, StateStatus} import pl.touk.nussknacker.k8s.manager.K8sDeploymentManager.parseVersionAnnotation -import pl.touk.nussknacker.k8s.manager.K8sDeploymentStatusMapper.{ - availableCondition, - crashLoopBackOffReason, - newReplicaSetAvailable, - progressingCondition, - replicaFailureCondition, - trueConditionStatus -} -import skuber.{Container, Pod} import skuber.apps.v1.Deployment +import skuber.{Container, Pod} -object K8sDeploymentStatusMapper { +//Based on https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#deployment-status +object K8sDeploymentStatusMapper extends LazyLogging { private val availableCondition = "Available" @@ -35,44 +23,39 @@ object K8sDeploymentStatusMapper { private val crashLoopBackOffReason = "CrashLoopBackOff" private val newReplicaSetAvailable = "NewReplicaSetAvailable" -} - -//Based on https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#deployment-status -class K8sDeploymentStatusMapper(definitionManager: ProcessStateDefinitionManager) extends LazyLogging { private[manager] def findStatusForDeploymentsAndPods( deployments: List[Deployment], pods: List[Pod] - ): Option[StatusDetails] = { + ): Option[DeploymentStatusDetails] = { deployments match { case Nil => None case one :: Nil => Some(status(one, pods)) case duplicates => Some( - StatusDetails( - ProblemStateStatus.MultipleJobsRunning, + DeploymentStatusDetails( + ProblemStateStatus( + description = "More than one deployment is running.", + allowedActions = Set(ScenarioActionName.Cancel), + tooltip = Some(s"Expected one deployment, instead: ${duplicates.map(_.metadata.name).mkString(", ")}") + ), + None, None, - errors = List(s"Expected one deployment, instead: ${duplicates.map(_.metadata.name).mkString(", ")}") ) ) } } - private[manager] def status(deployment: Deployment, pods: List[Pod]): StatusDetails = { - val (status, attrs, errors) = deployment.status match { - case None => (SimpleStateStatus.DuringDeploy, None, Nil) + private[manager] def status(deployment: Deployment, pods: List[Pod]): DeploymentStatusDetails = { + val status = deployment.status match { + case None => SimpleStateStatus.DuringDeploy case Some(status) => mapStatusWithPods(status, pods) } - val startTime = deployment.metadata.creationTimestamp.map(_.toInstant.toEpochMilli) - StatusDetails( - status, + DeploymentStatusDetails( + status = status, // TODO: return internal deploymentId, probably computed based on some hash to make sure that it will change only when something in scenario change - None, - None, - parseVersionAnnotation(deployment), - startTime, - attrs, - errors + deploymentId = None, + version = parseVersionAnnotation(deployment).map(_.versionId), ) } @@ -80,24 +63,34 @@ class K8sDeploymentStatusMapper(definitionManager: ProcessStateDefinitionManager private[manager] def mapStatusWithPods( status: Deployment.Status, pods: List[Pod] - ): (StateStatus, Option[Json], List[String]) = { + ): StateStatus = { def condition(name: String): Option[Deployment.Condition] = status.conditions.find(cd => cd.`type` == name) def anyContainerInState(state: Container.State) = pods.flatMap(_.status.toList).flatMap(_.containerStatuses).exists(_.state.exists(_ == state)) (condition(availableCondition), condition(progressingCondition), condition(replicaFailureCondition)) match { case (Some(available), None | ProgressingNewReplicaSetAvailable(), _) if isTrue(available) => - (SimpleStateStatus.Running, None, Nil) + SimpleStateStatus.Running case (_, Some(progressing), _) if isTrue(progressing) && anyContainerInState(Container.Waiting(Some(crashLoopBackOffReason))) => logger.debug( s"Some containers are in waiting state with CrashLoopBackOff reason - returning Restarting status. Pods: $pods" ) - (SimpleStateStatus.Restarting, None, Nil) - case (_, Some(progressing), _) if isTrue(progressing) => (SimpleStateStatus.DuringDeploy, None, Nil) + SimpleStateStatus.Restarting + case (_, Some(progressing), _) if isTrue(progressing) => + SimpleStateStatus.DuringDeploy case (_, _, Some(replicaFailure)) if isTrue(replicaFailure) => - (ProblemStateStatus.Failed, None, replicaFailure.message.toList) - case (a, b, _) => (ProblemStateStatus.Failed, None, a.flatMap(_.message).toList ++ b.flatMap(_.message).toList) + ProblemStateStatus( + "There are some problems with scenario.", + tooltip = replicaFailure.message.map("Error: " + _) + ) + case (a, b, _) => + ProblemStateStatus( + "There are some problems with scenario.", + tooltip = NonEmptyList + .fromList(a.flatMap(_.message).toList ++ b.flatMap(_.message).toList) + .map(_.toList.mkString("Errors: ", ", ", "")) + ) } } diff --git a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/BaseK8sDeploymentManagerTest.scala b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/BaseK8sDeploymentManagerTest.scala index c244db2fc50..0805f0a9469 100644 --- a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/BaseK8sDeploymentManagerTest.scala +++ b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/BaseK8sDeploymentManagerTest.scala @@ -145,16 +145,16 @@ class BaseK8sDeploymentManagerTest } finally { manager.processCommand(DMCancelScenarioCommand(version.processName, DeploymentData.systemUser)).futureValue eventually { - manager.getProcessStates(version.processName).futureValue.value shouldBe List.empty + manager.getScenarioDeploymentsStatuses(version.processName).futureValue.value shouldBe List.empty } } } def waitForRunning(version: ProcessVersion): Assertion = { eventually { - val state = manager.getProcessStates(version.processName).map(_.value).futureValue + val state = manager.getScenarioDeploymentsStatuses(version.processName).map(_.value).futureValue logger.debug(s"Current process state: $state") - state.flatMap(_.version) shouldBe List(version) + state.flatMap(_.version) shouldBe List(version.versionId) state.map(_.status) shouldBe List(SimpleStateStatus.Running) } } diff --git a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerKafkaTest.scala b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerKafkaTest.scala index 344ae6e5850..d9def927f36 100644 --- a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerKafkaTest.scala +++ b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerKafkaTest.scala @@ -102,8 +102,8 @@ class K8sDeploymentManagerKafkaTest def waitForRunning(version: ProcessVersion) = { eventually { - val state = manager.getProcessStates(version.processName).map(_.value).futureValue - state.flatMap(_.version) shouldBe List(version) + val state = manager.getScenarioDeploymentsStatuses(version.processName).map(_.value).futureValue + state.flatMap(_.version) shouldBe List(version.versionId) state.map(_.status) shouldBe List(SimpleStateStatus.Running) } } @@ -391,7 +391,7 @@ class K8sDeploymentManagerKafkaTest private def cancelAndAssertCleanup(manager: K8sDeploymentManager, version: ProcessVersion) = { manager.processCommand(DMCancelScenarioCommand(version.processName, DeploymentData.systemUser)).futureValue eventually { - manager.getProcessStates(version.processName).map(_.value).futureValue shouldBe List.empty + manager.getScenarioDeploymentsStatuses(version.processName).map(_.value).futureValue shouldBe List.empty } assertNoGarbageLeft() } diff --git a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerOnMocksTest.scala b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerOnMocksTest.scala index b1f121d6fd2..60ff1d54d89 100644 --- a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerOnMocksTest.scala +++ b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerOnMocksTest.scala @@ -87,13 +87,13 @@ class K8sDeploymentManagerOnMocksTest stubWithFixedDelay(durationLongerThanClientTimeout) a[TcpIdleTimeoutException] shouldBe thrownBy { manager - .getProcessStates(ProcessName("foo")) + .getScenarioDeploymentsStatuses(ProcessName("foo")) .futureValueEnsuringInnerException(durationLongerThanClientTimeout) } stubWithFixedDelay(0 seconds) val result = manager - .getProcessStates(ProcessName("foo")) + .getScenarioDeploymentsStatuses(ProcessName("foo")) .map(_.value) .futureValueEnsuringInnerException(durationLongerThanClientTimeout) result shouldEqual List.empty diff --git a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerReqRespTest.scala b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerReqRespTest.scala index fcbf34b6896..aff3839938c 100644 --- a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerReqRespTest.scala +++ b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentManagerReqRespTest.scala @@ -179,8 +179,8 @@ class K8sDeploymentManagerReqRespTest ) .futureValue eventually { - val state = f.manager.getProcessStates(secondVersionInfo.processName).map(_.value).futureValue - state.flatMap(_.version).map(_.versionId.value) shouldBe List(secondVersion) + val state = f.manager.getScenarioDeploymentsStatuses(secondVersionInfo.processName).map(_.value).futureValue + state.flatMap(_.version).map(_.value) shouldBe List(secondVersion) state.map(_.status) shouldBe List(SimpleStateStatus.Running) } val versionsAfterRedeploy = checkVersions() diff --git a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapperSpec.scala b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapperSpec.scala index 9722eb07de6..1d4baef9b60 100644 --- a/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapperSpec.scala +++ b/engine/lite/k8sDeploymentManager/src/test/scala/pl/touk/nussknacker/k8s/manager/K8sDeploymentStatusMapperSpec.scala @@ -2,26 +2,21 @@ package pl.touk.nussknacker.k8s.manager import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers -import pl.touk.nussknacker.engine.api.ProcessVersion -import pl.touk.nussknacker.engine.api.deployment.StatusDetails import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus.ProblemStateStatus -import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId} +import pl.touk.nussknacker.engine.api.deployment.{DeploymentStatusDetails, ScenarioActionName} +import pl.touk.nussknacker.engine.api.process.VersionId import pl.touk.nussknacker.engine.util.ResourceLoader import play.api.libs.json.{Format, Json} -import skuber.{ListResource, Pod} import skuber.apps.v1.Deployment import skuber.json.format._ +import skuber.{ListResource, Pod} //It's no so easy to move deployment in unstable state reliably, so //for now we have unit tests based on real responses - generated manually, using kubectl -v=9 describe deployment [...] class K8sDeploymentStatusMapperSpec extends AnyFunSuite with Matchers { - private val mapper = new K8sDeploymentStatusMapper(K8sProcessStateDefinitionManager) - - private val timestamp = 1640769008000L - - private val version = ProcessVersion(VersionId(4), ProcessName("AAAAA"), ProcessId(7), List.empty, "admin", Some(2)) + private val version = VersionId(4) private def parseResource[T](source: String)(implicit format: Format[T]): T = { val value = Json.parse(ResourceLoader.load(s"/k8sResponses/$source")) @@ -29,62 +24,79 @@ class K8sDeploymentStatusMapperSpec extends AnyFunSuite with Matchers { } test("detects running scenario") { - val state = mapper.findStatusForDeploymentsAndPods(parseResource[Deployment]("running.json") :: Nil, Nil) + val state = + K8sDeploymentStatusMapper.findStatusForDeploymentsAndPods(parseResource[Deployment]("running.json") :: Nil, Nil) state shouldBe Some( - StatusDetails(SimpleStateStatus.Running, None, None, Some(version), Some(timestamp), None, Nil) + DeploymentStatusDetails( + status = SimpleStateStatus.Running, + deploymentId = None, + version = Some(version), + ) ) } test("detects scenario in deployment") { - val state = mapper.findStatusForDeploymentsAndPods(parseResource[Deployment]("inProgress.json") :: Nil, Nil) + val state = K8sDeploymentStatusMapper.findStatusForDeploymentsAndPods( + parseResource[Deployment]("inProgress.json") :: Nil, + Nil + ) state shouldBe Some( - StatusDetails(SimpleStateStatus.DuringDeploy, None, None, Some(version), Some(timestamp), None, Nil) + DeploymentStatusDetails( + status = SimpleStateStatus.DuringDeploy, + deploymentId = None, + version = Some(version), + ) ) } test("detects scenario without progress") { - val state = mapper.findStatusForDeploymentsAndPods(parseResource[Deployment]("progressFailed.json") :: Nil, Nil) + val state = K8sDeploymentStatusMapper.findStatusForDeploymentsAndPods( + parseResource[Deployment]("progressFailed.json") :: Nil, + Nil + ) state shouldBe Some( - StatusDetails( - ProblemStateStatus.Failed, - None, - None, - Some(version), - Some(timestamp), - None, - List( - "Deployment does not have minimum availability.", - "ReplicaSet \"scenario-7-processname-aaaaa-x-5c799f64b8\" has timed out progressing." - ) + DeploymentStatusDetails( + status = ProblemStateStatus( + "There are some problems with scenario.", + tooltip = Some( + "Errors: Deployment does not have minimum availability., ReplicaSet \"scenario-7-processname-aaaaa-x-5c799f64b8\" has timed out progressing." + ) + ), + deploymentId = None, + version = Some(version), ) ) } test("detects restarting (crashing) scenario") { - val state = mapper.findStatusForDeploymentsAndPods( + val state = K8sDeploymentStatusMapper.findStatusForDeploymentsAndPods( parseResource[Deployment]("inProgress.json") :: Nil, parseResource[ListResource[Pod]]("podsCrashLoopBackOff.json").items ) state shouldBe Some( - StatusDetails(SimpleStateStatus.Restarting, None, None, Some(version), Some(timestamp), None, Nil) + DeploymentStatusDetails( + status = SimpleStateStatus.Restarting, + deploymentId = None, + version = Some(version), + ) ) } test("detects multiple deployments") { val deployment = parseResource[Deployment]("running.json") val deployment2 = deployment.copy(metadata = deployment.metadata.copy(name = "otherName")) - val state = mapper.findStatusForDeploymentsAndPods(deployment :: deployment2 :: Nil, Nil) + val state = K8sDeploymentStatusMapper.findStatusForDeploymentsAndPods(deployment :: deployment2 :: Nil, Nil) state shouldBe Some( - StatusDetails( - ProblemStateStatus.MultipleJobsRunning, - None, - None, - None, - None, - None, - "Expected one deployment, instead: scenario-7-processname-aaaaa-x, otherName" :: Nil + DeploymentStatusDetails( + status = ProblemStateStatus( + description = "More than one deployment is running.", + allowedActions = Set(ScenarioActionName.Cancel), + tooltip = Some("Expected one deployment, instead: scenario-7-processname-aaaaa-x, otherName") + ), + deploymentId = None, + version = None, ) ) } diff --git a/engine/lite/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/TestRunner.scala b/engine/lite/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/TestRunner.scala index bf39c425d26..4d846fd64e8 100644 --- a/engine/lite/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/TestRunner.scala +++ b/engine/lite/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/TestRunner.scala @@ -5,9 +5,9 @@ import cats.{Id, Monad, ~>} import io.circe.Json import pl.touk.nussknacker.engine.Interpreter.InterpreterShape import pl.touk.nussknacker.engine.ModelData -import pl.touk.nussknacker.engine.api.process.{ComponentUseCase, ProcessName, Source} +import pl.touk.nussknacker.engine.api.JobData +import pl.touk.nussknacker.engine.api.process.{ComponentUseCase, Source} import pl.touk.nussknacker.engine.api.test.ScenarioTestData -import pl.touk.nussknacker.engine.api.{JobData, ProcessVersion} import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess import pl.touk.nussknacker.engine.lite.TestRunner.EffectUnwrapper import pl.touk.nussknacker.engine.lite.api.commonTypes.ResultType diff --git a/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessAction.scala b/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessAction.scala index 4a51e81d48f..4fd08eca932 100644 --- a/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessAction.scala +++ b/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessAction.scala @@ -21,18 +21,26 @@ import java.util.UUID processId: ProcessId, // Used by external project // We use process action only for finished/execution finished actions so processVersionId is always defined - processVersionId: VersionId, - user: String, + override val processVersionId: VersionId, + override val user: String, // We use process action only for finished/execution finished actions so performedAt is always defined // Used by external project performedAt: Instant, // Used by external project - actionName: ScenarioActionName, - state: ProcessActionState, + override val actionName: ScenarioActionName, + override val state: ProcessActionState, failureMessage: Option[String], // Used by external project comment: Option[String], -) +) extends ScenarioStatusActionDetails + +// This is the narrowest set of information required by scenario status resolving mechanism. +trait ScenarioStatusActionDetails { + def actionName: ScenarioActionName + def state: ProcessActionState + def processVersionId: VersionId + def user: String +} final case class ProcessActionId(value: UUID) { override def toString: String = value.toString @@ -76,8 +84,9 @@ object ScenarioActionName { val Cancel: ScenarioActionName = ScenarioActionName("CANCEL") val Archive: ScenarioActionName = ScenarioActionName("ARCHIVE") val UnArchive: ScenarioActionName = ScenarioActionName("UNARCHIVE") - val Pause: ScenarioActionName = ScenarioActionName("PAUSE") // TODO: To implement in future.. - val Rename: ScenarioActionName = ScenarioActionName("RENAME") + // TODO remove unused action + val Pause: ScenarioActionName = ScenarioActionName("PAUSE") // TODO: To implement in future.. + val Rename: ScenarioActionName = ScenarioActionName("RENAME") // TODO: We kept the old name of "run now" CustomAction for compatibility reasons. // In the future it can be changed to better name, according to convention, but that would require database migration // In the meantime, there are methods serialize and deserialize, which operate on name RUN_OFF_SCHEDULE instead. @@ -85,7 +94,7 @@ object ScenarioActionName { val DefaultActions: List[ScenarioActionName] = Nil - val StateActions: Set[ScenarioActionName] = Set(Cancel, Deploy, Pause) + val ScenarioStatusActions: Set[ScenarioActionName] = Set(Cancel, Deploy) def serialize(name: ScenarioActionName): String = name match { case ScenarioActionName.RunOffSchedule => "RUN_OFF_SCHEDULE" diff --git a/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessState.scala b/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessState.scala deleted file mode 100644 index d9bbec4b1b8..00000000000 --- a/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/ProcessState.scala +++ /dev/null @@ -1,97 +0,0 @@ -package pl.touk.nussknacker.engine.api.deployment - -import io.circe._ -import io.circe.generic.JsonCodec -import pl.touk.nussknacker.engine.api.ProcessVersion -import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName -import pl.touk.nussknacker.engine.api.process.VersionId -import pl.touk.nussknacker.engine.deployment.{DeploymentId, ExternalDeploymentId} - -import java.net.URI - -/** - * Represents status of a scenario. - * Contains: - * - status itself and its evaluation moment: status, startTime - * - how to display in UI: icon, tooltip, description - * - deployment info: deploymentId, version - * - which actions are allowed: allowedActions - * - additional properties: attributes, errors - * - * Statuses definition, allowed actions and current scenario ProcessState is defined by [[ProcessStateDefinitionManager]]. - * @param description Short message displayed in top right panel of scenario diagram panel. - * @param tooltip Message displayed when mouse is hoovering over an icon (both scenarios and diagram panel). - * May contain longer, detailed status description. - */ -@JsonCodec case class ProcessState( - externalDeploymentId: Option[ExternalDeploymentId], - status: StateStatus, - version: Option[ProcessVersion], - visibleActions: List[ScenarioActionName], - allowedActions: List[ScenarioActionName], - actionTooltips: Map[ScenarioActionName, String], - icon: URI, - tooltip: String, - description: String, - startTime: Option[Long], - attributes: Option[Json], - errors: List[String] -) - -object ProcessState { - implicit val uriEncoder: Encoder[URI] = Encoder.encodeString.contramap(_.toString) - implicit val uriDecoder: Decoder[URI] = Decoder.decodeString.map(URI.create) - implicit val scenarioVersionIdEncoder: Encoder[ScenarioVersionId] = Encoder.encodeLong.contramap(_.value) - implicit val scenarioVersionIdDecoder: Decoder[ScenarioVersionId] = Decoder.decodeLong.map(ScenarioVersionId.apply) - - implicit val scenarioActionNameEncoder: Encoder[ScenarioActionName] = - Encoder.encodeString.contramap(ScenarioActionName.serialize) - implicit val scenarioActionNameDecoder: Decoder[ScenarioActionName] = - Decoder.decodeString.map(ScenarioActionName.deserialize) - - implicit val scenarioActionNameKeyDecoder: KeyDecoder[ScenarioActionName] = - (key: String) => Some(ScenarioActionName.deserialize(key)) - implicit val scenarioActionNameKeyEncoder: KeyEncoder[ScenarioActionName] = (name: ScenarioActionName) => - ScenarioActionName.serialize(name) - -} - -object StateStatus { - type StatusName = String - - // StateStatus has to have Decoder defined because it is decoded along with ProcessState in the migration process - // (see StandardRemoteEnvironment class). - // In all cases (this one and for FE purposes) only info about the status name is essential. We could encode status - // just as a String but for compatibility reasons we encode it as a nested object with one, 'name' field - implicit val statusEncoder: Encoder[StateStatus] = Encoder.encodeString - .contramap[StateStatus](_.name) - .mapJson(nameJson => Json.fromFields(Seq("name" -> nameJson))) - - implicit val statusDecoder: Decoder[StateStatus] = Decoder.decodeString.at("name").map(NoAttributesStateStatus) - - // Temporary methods to simplify status creation - def apply(statusName: StatusName): StateStatus = NoAttributesStateStatus(statusName) - -} - -trait StateStatus { - // Status identifier, should be unique among all states registered within all processing types. - def name: StatusName -} - -case class NoAttributesStateStatus(name: StatusName) extends StateStatus { - override def toString: String = name -} - -case class StatusDetails( - status: StateStatus, - deploymentId: Option[DeploymentId], - externalDeploymentId: Option[ExternalDeploymentId] = None, - version: Option[ProcessVersion] = None, - startTime: Option[Long] = None, - attributes: Option[Json] = None, - errors: List[String] = List.empty -) { - def externalDeploymentIdUnsafe: ExternalDeploymentId = - externalDeploymentId.getOrElse(throw new IllegalStateException(s"externalDeploymentId is missing")) -} diff --git a/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/StateStatus.scala b/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/StateStatus.scala new file mode 100644 index 00000000000..b9fcafc109c --- /dev/null +++ b/extensions-api/src/main/scala/pl/touk/nussknacker/engine/api/deployment/StateStatus.scala @@ -0,0 +1,23 @@ +package pl.touk.nussknacker.engine.api.deployment + +import pl.touk.nussknacker.engine.api.deployment.StateStatus.StatusName + +// This class represents both deployment status and scenario status. +// TODO: we should use DeploymentStatus in StatusDetails which is returned by DeploymentManager.getScenarioDeploymentsStatuses +// but before we do this, we should move version to dedicated status and scenario scheduling mechanism should stop using DeploymentManager's API +trait StateStatus { + // Status identifier, should be unique among all states registered within all processing types. + def name: StatusName +} + +object StateStatus { + type StatusName = String + + // Temporary methods to simplify status creation + def apply(statusName: StatusName): StateStatus = NoAttributesStateStatus(statusName) + +} + +case class NoAttributesStateStatus(name: StatusName) extends StateStatus { + override def toString: String = name +} diff --git a/scenario-compiler/src/main/scala/pl/touk/nussknacker/engine/definition/component/DynamicComponentStaticDefinitionDeterminer.scala b/scenario-compiler/src/main/scala/pl/touk/nussknacker/engine/definition/component/DynamicComponentStaticDefinitionDeterminer.scala index 06ea83d56b7..29aee4e4acf 100644 --- a/scenario-compiler/src/main/scala/pl/touk/nussknacker/engine/definition/component/DynamicComponentStaticDefinitionDeterminer.scala +++ b/scenario-compiler/src/main/scala/pl/touk/nussknacker/engine/definition/component/DynamicComponentStaticDefinitionDeterminer.scala @@ -2,7 +2,7 @@ package pl.touk.nussknacker.engine.definition.component import com.typesafe.scalalogging.LazyLogging import pl.touk.nussknacker.engine.ModelData -import pl.touk.nussknacker.engine.api.component.{ComponentId, ParameterConfig} +import pl.touk.nussknacker.engine.api.component.ComponentId import pl.touk.nussknacker.engine.api.context.ValidationContext import pl.touk.nussknacker.engine.api.context.transformation.{ DynamicComponent, @@ -11,7 +11,6 @@ import pl.touk.nussknacker.engine.api.context.transformation.{ WithStaticParameters } import pl.touk.nussknacker.engine.api.definition.{OutputVariableNameDependency, Parameter} -import pl.touk.nussknacker.engine.api.parameter.ParameterName import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.api.typed.typing.{TypingResult, Unknown} import pl.touk.nussknacker.engine.api.{JobData, MetaData, NodeId, ProcessVersion} diff --git a/scenario-compiler/src/test/scala/pl/touk/nussknacker/engine/definition/component/parameter/validator/ValidationExpressionParameterValidatorTest.scala b/scenario-compiler/src/test/scala/pl/touk/nussknacker/engine/definition/component/parameter/validator/ValidationExpressionParameterValidatorTest.scala index 8e0d17290c4..889724fab7d 100644 --- a/scenario-compiler/src/test/scala/pl/touk/nussknacker/engine/definition/component/parameter/validator/ValidationExpressionParameterValidatorTest.scala +++ b/scenario-compiler/src/test/scala/pl/touk/nussknacker/engine/definition/component/parameter/validator/ValidationExpressionParameterValidatorTest.scala @@ -4,9 +4,8 @@ import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatest.prop.TableDrivenPropertyChecks import org.springframework.expression.spel.standard.SpelExpressionParser -import org.springframework.expression.spel.support.StandardEvaluationContext import pl.touk.nussknacker.engine.api.parameter.ParameterName -import pl.touk.nussknacker.engine.api.{Context, JobData, MetaData, NodeId, ProcessVersion, StreamMetaData} +import pl.touk.nussknacker.engine.api._ import pl.touk.nussknacker.engine.definition.clazz.ClassDefinitionTestUtils import pl.touk.nussknacker.engine.definition.component.parameter.validator.TestSpelExpression.expressionConfig import pl.touk.nussknacker.engine.definition.globalvariables.ExpressionConfigDefinition