diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9352fa52f..e017d70c2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1 @@ -# This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/index-management \ No newline at end of file +* @bowenlan-amzn @getsaurabh02 @lezzago @praveensameneni @xluo-aws @gaobinlong @Hailong-am diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7ca8b9920..7aef355b1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,10 +8,8 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | ---------------- | ----------------------------------------------------- | ----------- | | Ashish Agrawal | [lezzago](https://github.com/lezzago) | Amazon | | Bowen Lan | [bowenlan-amzn](https://github.com/bowenlan-amzn) | Amazon | -| Charlotte | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | -| Drew Baugher | [dbbaughe](https://github.com/dbbaughe) | Amazon | -| Mohammad Qureshi | [qreshi](https://github.com/qreshi) | Amazon | -| Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Praveen Sameneni | [praveensameneni](https://github.com/praveensameneni) | Amazon | -| Ravi | [thalurur](https://github.com/thalurur) | Amazon | -| Sriram | [skkosuri-amzn](https://github.com/skkosuri-amzn) | Amazon | +| Saurabh Singh | [getsaurabh02](https://github.com/getsaurabh02/) | Amazon | +| Xuesong Luo | [xluo-aws](https://github.com/xluo-aws) | Amazon | +| Hailong Cui | [Hailong-am](https://github.com/Hailong-am) | Amazon | +| Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | diff --git a/build-tools/coverage.gradle b/build-tools/coverage.gradle index fd9530964..8abac12a1 100644 --- a/build-tools/coverage.gradle +++ b/build-tools/coverage.gradle @@ -21,7 +21,7 @@ // testing tasks don't derive from Test so the jacoco plugin can't do this automatically. def jacocoDir = "${buildDir}/jacoco" -task dummyTest(type: Test) { +tasks.register("dummyTest", Test) { enabled = false workingDir = file("/") // Force absolute path to jacoco agent jar jacoco { @@ -31,7 +31,7 @@ task dummyTest(type: Test) { } } -task dummyIntegTest(type: Test) { +tasks.register("dummyIntegTest", Test) { enabled = false workingDir = file("/") // Force absolute path to jacoco agent jar jacoco { @@ -51,8 +51,8 @@ jacocoTestReport { sourceDirectories.from = "src/main/kotlin" classDirectories.from = sourceSets.main.output reports { - html.enabled = true // human readable - xml.enabled = true // for coverlay + html.required = true // human readable + xml.required = true // for coverlay } } diff --git a/build-tools/pkgbuild.gradle b/build-tools/pkgbuild.gradle index 6425d1e78..71cea1a7a 100644 --- a/build-tools/pkgbuild.gradle +++ b/build-tools/pkgbuild.gradle @@ -3,7 +3,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -apply plugin: 'nebula.ospackage' +apply plugin: 'com.netflix.nebula.ospackage' // This is afterEvaluate because the bundlePlugin ZIP task is updated afterEvaluate and changes the ZIP name to match the plugin name afterEvaluate { @@ -13,7 +13,7 @@ afterEvaluate { version = "${project.version}" - "-SNAPSHOT" into '/usr/share/opensearch/plugins' - from(zipTree(bundlePlugin.archivePath)) { + from(zipTree(bundlePlugin.archiveFile)) { into opensearchplugin.name } @@ -41,24 +41,24 @@ afterEvaluate { arch = 'NOARCH' dependsOn 'assemble' finalizedBy 'renameRpm' - task renameRpm(type: Copy) { + tasks.register("renameRpm", Copy) { from("$buildDir/distributions") into("$buildDir/distributions") - include archiveName - rename archiveName, "${packageName}-${version}.rpm" - doLast { delete file("$buildDir/distributions/$archiveName") } + include archiveFileName + rename archiveFileName, "${packageName}-${version}.rpm" + doLast { delete file("$buildDir/distributions/$archiveFileName") } } } buildDeb { arch = 'all' dependsOn 'assemble' finalizedBy 'renameDeb' - task renameDeb(type: Copy) { + tasks.register("renameDeb", Copy) { from("$buildDir/distributions") into("$buildDir/distributions") - include archiveName - rename archiveName, "${packageName}-${version}.deb" - doLast { delete file("$buildDir/distributions/$archiveName") } + include archiveFileName + rename archiveFileName, "${packageName}-${version}.deb" + doLast { delete file("$buildDir/distributions/$archiveFileName") } } } } diff --git a/build.gradle b/build.gradle index 1d6a2dc9e..1c8c2b83a 100644 --- a/build.gradle +++ b/build.gradle @@ -63,7 +63,7 @@ buildscript { notifications_core_build_download = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-notifications-core-' + notifications_no_snapshot + '.zip' - kotlin_version = System.getProperty("kotlin.version", "1.6.10") + kotlin_version = System.getProperty("kotlin.version", "1.8.21") security_plugin_version = System.getProperty("security.version", opensearch_build) } @@ -85,7 +85,7 @@ buildscript { } plugins { - id 'nebula.ospackage' version "8.3.0" + id "com.netflix.nebula.ospackage" version "11.3.0" id "com.dorongold.task-tree" version "2.1.1" } @@ -137,6 +137,8 @@ opensearchplugin { description 'OpenSearch Index Management Plugin' classname 'org.opensearch.indexmanagement.IndexManagementPlugin' extendedPlugins = ['opensearch-job-scheduler'] + licenseFile rootProject.file('LICENSE') + noticeFile rootProject.file('NOTICE') } tasks.named("integTest").configure { @@ -182,8 +184,6 @@ configurations.testImplementation { ext { projectSubstitutions = [:] - licenseFile = rootProject.file('LICENSE') - noticeFile = rootProject.file('NOTICE') } allprojects { diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 249e5832f..c1962a79e 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index ae04661ee..37aef8d3f 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.1.1-bin.zip +networkTimeout=10000 zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index a69d9cb6c..aeb74cbb4 100755 --- a/gradlew +++ b/gradlew @@ -55,7 +55,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -80,13 +80,10 @@ do esac done -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" +# This is normally unused +# shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -143,12 +140,16 @@ fi if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac case $MAX_FD in #( '' | soft) :;; #( *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -193,6 +194,10 @@ if "$cygwin" || "$msys" ; then done fi + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + # Collect all arguments for the java command; # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of # shell script including quotes and variable substitutions, so put them in diff --git a/gradlew.bat b/gradlew.bat index 53a6b238d..6689b85be 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -26,6 +26,7 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% diff --git a/release-notes/opensearch-index-management.release-notes-2.7.0.0.md b/release-notes/opensearch-index-management.release-notes-2.7.0.0.md new file mode 100644 index 000000000..9ad5d3d38 --- /dev/null +++ b/release-notes/opensearch-index-management.release-notes-2.7.0.0.md @@ -0,0 +1,24 @@ +## Version 2.7.0.0 2023-04-17 + +Compatible with OpenSearch 2.7.0 + +### Maintenance +* Bump mockito version. ([#701](https://github.com/opensearch-project/index-management/pull/701)) +* Bump version to 2.7. ([#743](https://github.com/opensearch-project/index-management/pull/743)) + +### Features +* Error Prevention: Add close action. ([#728](https://github.com/opensearch-project/index-management/pull/728)) +* Error Prevention: Add index priority action. ([#729](https://github.com/opensearch-project/index-management/pull/729)) +* Error Prevention: Add notification, shrink, allocation and rollup. ([#732](https://github.com/opensearch-project/index-management/pull/732)) +* Error Prevention: Add transition action. ([#744](https://github.com/opensearch-project/index-management/pull/744)) +* Error Prevention: Add snapshot action. ([#745](https://github.com/opensearch-project/index-management/pull/745)) + +### Refactoring +* Replace Set in org.opensearch.common.collect with java.util references. ([#717](https://github.com/opensearch-project/index-management/pull/717)) +* Fixed xContent dependencies due to OSCore changes. ([#721](https://github.com/opensearch-project/index-management/pull/721)) + +### Bug fixes +* Shrink action Fix. ([#718](https://github.com/opensearch-project/index-management/pull/718)) + +### Documentation +* Added 2.7 release notes. ([#755](https://github.com/opensearch-project/index-management/pull/755)) \ No newline at end of file diff --git a/spi/build.gradle b/spi/build.gradle index 4b398668a..2fa3f6bf7 100644 --- a/spi/build.gradle +++ b/spi/build.gradle @@ -23,15 +23,23 @@ ext { noticeFile = rootProject.file('NOTICE') } +plugins.withId('java') { + sourceCompatibility = targetCompatibility = JavaVersion.VERSION_11 +} + +plugins.withId('org.jetbrains.kotlin.jvm') { + compileKotlin.kotlinOptions.jvmTarget = compileTestKotlin.kotlinOptions.jvmTarget = JavaVersion.VERSION_11 +} + jacoco { toolVersion = '0.8.7' - reportsDir = file("$buildDir/JacocoReport") + reportsDirectory = file("$buildDir/JacocoReport") } jacocoTestReport { reports { - xml.enabled false - csv.enabled false + xml.required.set(false) + csv.required.set(false) html.destination file("${buildDir}/jacoco/") } } @@ -44,7 +52,7 @@ repositories { maven { url "https://aws.oss.sonatype.org/content/repositories/snapshots" } } -configurations.all { +configurations.configureEach { if (it.state != Configuration.State.UNRESOLVED) return resolutionStrategy { force "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" @@ -69,21 +77,22 @@ idea.module { excludeDirs -= file("$buildDir") } -task sourcesJar(type: Jar, dependsOn: classes) { - classifier = 'sources' +tasks.register("sourcesJar", Jar) { + dependsOn "classes" + archiveClassifier = 'sources' from sourceSets.main.allSource } test { doFirst { test.classpath -= project.files(project.tasks.named('shadowJar')) - test.classpath -= project.configurations.getByName(ShadowBasePlugin.CONFIGURATION_NAME) - test.classpath += project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath + test.classpath -= project.configurations.named(ShadowBasePlugin.CONFIGURATION_NAME) + test.classpath += project.extensions.getByType(SourceSetContainer).named(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath } systemProperty 'tests.security.manager', 'false' } -task integTest(type: RestIntegTestTask) { +tasks.register("integTest", RestIntegTestTask) { description 'Run integ test with opensearch test framework' group 'verification' systemProperty 'tests.security.manager', 'false' diff --git a/src/main/kotlin/org/opensearch/indexmanagement/IndexManagementPlugin.kt b/src/main/kotlin/org/opensearch/indexmanagement/IndexManagementPlugin.kt index c8b5a986d..3180559e9 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/IndexManagementPlugin.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/IndexManagementPlugin.kt @@ -46,7 +46,6 @@ import org.opensearch.indexmanagement.indexstatemanagement.IndexMetadataProvider import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementHistory import org.opensearch.indexmanagement.indexstatemanagement.ManagedIndexCoordinator import org.opensearch.indexmanagement.indexstatemanagement.ManagedIndexRunner -import org.opensearch.indexmanagement.indexstatemanagement.MetadataService import org.opensearch.indexmanagement.indexstatemanagement.PluginVersionSweepCoordinator import org.opensearch.indexmanagement.indexstatemanagement.SkipExecution import org.opensearch.indexmanagement.indexstatemanagement.model.ManagedIndexConfig @@ -85,7 +84,6 @@ import org.opensearch.indexmanagement.indexstatemanagement.transport.action.upda import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataAction import org.opensearch.indexmanagement.indexstatemanagement.util.DEFAULT_INDEX_TYPE import org.opensearch.indexmanagement.indexstatemanagement.validation.ActionValidation -import org.opensearch.indexmanagement.indexstatemanagement.migration.ISMTemplateService import org.opensearch.indexmanagement.refreshanalyzer.RefreshSearchAnalyzerAction import org.opensearch.indexmanagement.refreshanalyzer.RestRefreshSearchAnalyzerAction import org.opensearch.indexmanagement.refreshanalyzer.TransportRefreshSearchAnalyzerAction @@ -449,12 +447,9 @@ class IndexManagementPlugin : JobSchedulerExtension, NetworkPlugin, ActionPlugin .registerExtensionChecker(extensionChecker) .registerIndexMetadataProvider(indexMetadataProvider) - val metadataService = MetadataService(client, clusterService, skipFlag, indexManagementIndices) - val templateService = ISMTemplateService(client, clusterService, xContentRegistry, indexManagementIndices) - val managedIndexCoordinator = ManagedIndexCoordinator( environment.settings(), - client, clusterService, threadPool, indexManagementIndices, metadataService, templateService, indexMetadataProvider + client, clusterService, threadPool, indexManagementIndices, indexMetadataProvider ) val smRunner = SMRunner.init(client, threadPool, settings, indexManagementIndices, clusterService) diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementHistory.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementHistory.kt index 9c46489d8..b957f7714 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementHistory.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementHistory.kt @@ -176,7 +176,7 @@ class IndexStateManagementHistory( clusterStateRequest, object : ActionListener { override fun onResponse(clusterStateResponse: ClusterStateResponse) { - if (!clusterStateResponse.state.metadata.indices.isEmpty) { + if (!clusterStateResponse.state.metadata.indices.isEmpty()) { val indicesToDelete = getIndicesToDelete(clusterStateResponse) logger.info("Deleting old history indices viz $indicesToDelete") deleteAllOldHistoryIndices(indicesToDelete) @@ -199,7 +199,10 @@ class IndexStateManagementHistory( val creationTime = indexMetaData.creationDate if ((Instant.now().toEpochMilli() - creationTime) > historyRetentionPeriod.millis) { - val alias = indexMetaData.aliases.firstOrNull { IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS == it.value.alias } + val alias = indexMetaData.aliases.firstNotNullOfOrNull { + alias -> + IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS == alias.value.alias + } if (alias != null && historyEnabled) { // If index has write alias and history is enable, don't delete the index. continue diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexCoordinator.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexCoordinator.kt index bb286ea9e..eeeeb5252 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexCoordinator.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexCoordinator.kt @@ -58,10 +58,7 @@ import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndex import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.INDEX_STATE_MANAGEMENT_ENABLED import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.JITTER import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.JOB_INTERVAL -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.METADATA_SERVICE_ENABLED -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.METADATA_SERVICE_STATUS import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.SWEEP_PERIOD -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings.Companion.TEMPLATE_MIGRATION_CONTROL import org.opensearch.indexmanagement.indexstatemanagement.transport.action.managedIndex.ManagedIndexAction import org.opensearch.indexmanagement.indexstatemanagement.transport.action.managedIndex.ManagedIndexRequest import org.opensearch.indexmanagement.indexstatemanagement.util.DEFAULT_INDEX_TYPE @@ -74,7 +71,6 @@ import org.opensearch.indexmanagement.indexstatemanagement.util.isFailed import org.opensearch.indexmanagement.indexstatemanagement.util.isPolicyCompleted import org.opensearch.indexmanagement.indexstatemanagement.util.managedIndexConfigIndexRequest import org.opensearch.indexmanagement.indexstatemanagement.util.updateEnableManagedIndexRequest -import org.opensearch.indexmanagement.indexstatemanagement.migration.ISMTemplateService import org.opensearch.indexmanagement.opensearchapi.IndexManagementSecurityContext import org.opensearch.indexmanagement.opensearchapi.contentParser import org.opensearch.indexmanagement.opensearchapi.parseFromSearchResponse @@ -89,7 +85,6 @@ import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.threadpool.Scheduler import org.opensearch.threadpool.ThreadPool -import java.time.Instant /** * Listens for cluster changes to pick up new indices to manage. @@ -114,8 +109,6 @@ class ManagedIndexCoordinator( private val clusterService: ClusterService, private val threadPool: ThreadPool, indexManagementIndices: IndexManagementIndices, - private val metadataService: MetadataService, - private val templateService: ISMTemplateService, private val indexMetadataProvider: IndexMetadataProvider ) : ClusterStateListener, CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("ManagedIndexCoordinator")), @@ -130,12 +123,9 @@ class ManagedIndexCoordinator( @Volatile private var lastFullSweepTimeNano = System.nanoTime() @Volatile private var indexStateManagementEnabled = INDEX_STATE_MANAGEMENT_ENABLED.get(settings) - @Volatile private var metadataServiceEnabled = METADATA_SERVICE_ENABLED.get(settings) @Volatile private var sweepPeriod = SWEEP_PERIOD.get(settings) @Volatile private var retryPolicy = BackoffPolicy.constantBackoff(COORDINATOR_BACKOFF_MILLIS.get(settings), COORDINATOR_BACKOFF_COUNT.get(settings)) - @Volatile private var templateMigrationEnabled: Boolean = true - @Volatile private var templateMigrationEnabledSetting = TEMPLATE_MIGRATION_CONTROL.get(settings) @Volatile private var jobInterval = JOB_INTERVAL.get(settings) @Volatile private var jobJitter = JITTER.get(settings) @@ -159,18 +149,6 @@ class ManagedIndexCoordinator( indexStateManagementEnabled = it if (!indexStateManagementEnabled) disable() else enable() } - clusterService.clusterSettings.addSettingsUpdateConsumer(METADATA_SERVICE_STATUS) { - metadataServiceEnabled = it == 0 - if (!metadataServiceEnabled) { - logger.info("Canceling metadata moving job because of cluster setting update.") - scheduledMoveMetadata?.cancel() - } else initMoveMetadata() - } - clusterService.clusterSettings.addSettingsUpdateConsumer(TEMPLATE_MIGRATION_CONTROL) { - templateMigrationEnabled = it >= 0L - if (!templateMigrationEnabled) scheduledTemplateMigration?.cancel() - else initTemplateMigration(it) - } clusterService.clusterSettings.addSettingsUpdateConsumer(COORDINATOR_BACKOFF_MILLIS, COORDINATOR_BACKOFF_COUNT) { millis, count -> retryPolicy = BackoffPolicy.constantBackoff(millis, count) } @@ -186,10 +164,6 @@ class ManagedIndexCoordinator( // Init background sweep when promoted to being cluster manager initBackgroundSweep() - - initMoveMetadata() - - initTemplateMigration(templateMigrationEnabledSetting) } fun offClusterManager() { @@ -227,8 +201,6 @@ class ManagedIndexCoordinator( override fun afterStart() { initBackgroundSweep() - - initMoveMetadata() } override fun beforeStop() { @@ -241,8 +213,6 @@ class ManagedIndexCoordinator( initBackgroundSweep() indexStateManagementEnabled = true - initMoveMetadata() - // Calling initBackgroundSweep() beforehand runs a sweep ensuring that policies removed from indices // and indices being deleted are accounted for prior to re-enabling jobs launch { @@ -510,76 +480,6 @@ class ManagedIndexCoordinator( scheduledFullSweep = threadPool.scheduleWithFixedDelay(scheduledSweep, sweepPeriod, executorName()) } - fun initMoveMetadata() { - if (!metadataServiceEnabled) return - if (!isIndexStateManagementEnabled()) return - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager) return - scheduledMoveMetadata?.cancel() - - if (metadataService.finishFlag) { - logger.info("Re-enable Metadata Service.") - metadataService.reenableMetadataService() - } - - val scheduledJob = Runnable { - launch { - try { - if (metadataService.finishFlag) { - logger.info("Cancel background move metadata process.") - scheduledMoveMetadata?.cancel() - } - - logger.info("Performing move cluster state metadata.") - metadataService.moveMetadata() - } catch (e: Exception) { - logger.error("Failed to move cluster state metadata", e) - } - } - } - - scheduledMoveMetadata = threadPool.scheduleWithFixedDelay(scheduledJob, TimeValue.timeValueMinutes(1), executorName()) - } - - fun initTemplateMigration(enableSetting: Long) { - if (!templateMigrationEnabled) return - if (!isIndexStateManagementEnabled()) return - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager) return - scheduledTemplateMigration?.cancel() - - // if service has finished, re-enable it - if (templateService.finishFlag) { - logger.info("Re-enable template migration service.") - templateService.reenableTemplateMigration() - } - - val scheduledJob = Runnable { - launch { - try { - if (templateService.finishFlag) { - logger.info("ISM template migration process finished, cancel scheduled job.") - scheduledTemplateMigration?.cancel() - return@launch - } - - logger.info("Performing ISM template migration.") - if (enableSetting == 0L) { - if (onClusterManagerTimeStamp != 0L) - templateService.doMigration(Instant.ofEpochMilli(onClusterManagerTimeStamp)) - else { - logger.error("No valid onClusterManager time cached, cancel ISM template migration job.") - scheduledTemplateMigration?.cancel() - } - } else - templateService.doMigration(Instant.ofEpochMilli(enableSetting)) - } catch (e: Exception) { - logger.error("Failed to migrate ISM template", e) - } - } - } - - scheduledTemplateMigration = threadPool.scheduleWithFixedDelay(scheduledJob, TimeValue.timeValueMinutes(1), executorName()) - } - private fun getFullSweepElapsedTime(): TimeValue = TimeValue.timeValueNanos(System.nanoTime() - lastFullSweepTimeNano) diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexRunner.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexRunner.kt index a6fdfdba8..c8d65321a 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexRunner.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/ManagedIndexRunner.kt @@ -262,12 +262,12 @@ object ManagedIndexRunner : } // Check the cluster state for the index metadata - var clusterStateIndexMetadata = getIndexMetadata(managedIndexConfig.index) + val clusterStateIndexMetadata = getIndexMetadata(managedIndexConfig.index) val defaultIndexMetadataService = indexMetadataProvider.services[DEFAULT_INDEX_TYPE] as DefaultIndexMetadataService val clusterStateIndexUUID = clusterStateIndexMetadata?.let { defaultIndexMetadataService.getCustomIndexUUID(it) } // If the index metadata is null, the index is not in the cluster state. If the index metadata is not null, but // the cluster state index uuid differs from the one in the managed index config then the config is referring - // to a different index which does not exist in the cluster. We need to check all of the extensions to confirm an index exists + // to a different index which does not exist in the cluster. We need to check all the extensions to confirm an index exists if (clusterStateIndexMetadata == null || clusterStateIndexUUID != managedIndexConfig.indexUuid) { // If the cluster state/default index type didn't have an index with a matching name and uuid combination, try all other index types val nonDefaultIndexTypes = indexMetadataProvider.services.keys.filter { it != DEFAULT_INDEX_TYPE } @@ -846,7 +846,7 @@ object ManagedIndexRunner : val response: ClusterStateResponse = client.admin().cluster().suspendUntil { state(clusterStateRequest, it) } - indexMetaData = response.state.metadata.indices.firstOrNull()?.value + indexMetaData = response.state.metadata.indices[index] } catch (e: Exception) { logger.error("Failed to get IndexMetaData from cluster manager cluster state for index=$index", e) } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataService.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataService.kt deleted file mode 100644 index c42682f71..000000000 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataService.kt +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement - -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.action.ActionListener -import org.opensearch.action.DocWriteRequest -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.bulk.BulkItemResponse -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.index.Index -import org.opensearch.indexmanagement.IndexManagementIndices -import org.opensearch.indexmanagement.indexstatemanagement.opensearchapi.getManagedIndexMetadata -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataAction -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataRequest -import org.opensearch.indexmanagement.indexstatemanagement.util.managedIndexMetadataIndexRequest -import org.opensearch.indexmanagement.indexstatemanagement.util.revertManagedIndexMetadataID -import org.opensearch.indexmanagement.opensearchapi.retry -import org.opensearch.indexmanagement.opensearchapi.suspendUntil -import org.opensearch.indexmanagement.util.IndexManagementException -import org.opensearch.indexmanagement.util.OpenForTesting -import org.opensearch.rest.RestStatus -import java.lang.Exception - -/** - * When all nodes have same version IM plugin (CDI/DDI finished) - * MetadataService starts to move metadata from cluster state to config index - */ -@OpenForTesting -@Suppress("MagicNumber", "ReturnCount", "LongMethod", "ComplexMethod") -class MetadataService( - private val client: Client, - private val clusterService: ClusterService, - private val skipExecution: SkipExecution, - private val imIndices: IndexManagementIndices -) { - private val logger = LogManager.getLogger(javaClass) - - @Volatile private var runningLock = false // in case 2 moveMetadata() process running - - private val successfullyIndexedIndices = mutableSetOf() - private var failedToIndexIndices = mutableMapOf() - private var failedToCleanIndices = mutableSetOf() - - private var counter = 0 - final var runTimeCounter = 1 - private set - private val maxRunTime = 10 - - // used in coordinator sweep to cancel scheduled process - @Volatile final var finishFlag = false - private set - fun reenableMetadataService() { finishFlag = false } - - @Volatile private var retryPolicy = - BackoffPolicy.constantBackoff(TimeValue.timeValueMillis(50), 3) - - suspend fun moveMetadata() { - if (runningLock) { - logger.info("There is a move metadata process running...") - return - } else if (finishFlag) { - logger.info("Move metadata has finished.") - return - } - try { - runningLock = true - - if (skipExecution.flag) { - logger.info("Cluster still has nodes running old version of ISM plugin, skip ping execution on new nodes until all nodes upgraded") - runningLock = false - return - } - - if (!imIndices.indexManagementIndexExists()) { - logger.info("ISM config index not exist, so we cancel the metadata migration job.") - finishFlag = true; runningLock = false; runTimeCounter = 0 - return - } - - if (runTimeCounter > maxRunTime) { - updateStatusSetting(-1) - finishFlag = true; runningLock = false; runTimeCounter = 0 - return - } - logger.info("Doing metadata migration $runTimeCounter time.") - - val indicesMetadata = clusterService.state().metadata.indices - var clusterStateManagedIndexMetadata = indicesMetadata.map { - it.key to it.value.getManagedIndexMetadata() - }.filter { it.second != null }.distinct().toMap() - // filter out previous failedToClean indices which already been indexed - clusterStateManagedIndexMetadata = - clusterStateManagedIndexMetadata.filter { it.key !in failedToCleanIndices.map { index -> index.name } } - - // filter out cluster state metadata with outdated index uuid - val corruptManagedIndices = mutableListOf() - val indexUuidMap = mutableMapOf() - clusterStateManagedIndexMetadata.forEach { (indexName, metadata) -> - val indexMetadata = indicesMetadata[indexName] - val currentIndexUuid = indexMetadata.indexUUID - if (currentIndexUuid != metadata?.indexUuid) { - corruptManagedIndices.add(indexMetadata.index) - } else { - indexUuidMap[currentIndexUuid] = indexName - } - } - logger.info("Corrupt managed indices with outdated index uuid in metadata: $corruptManagedIndices") - clusterStateManagedIndexMetadata = clusterStateManagedIndexMetadata.filter { (indexName, _) -> - indexName !in corruptManagedIndices.map { it.name } - } - - if (clusterStateManagedIndexMetadata.isEmpty()) { - if (counter++ > 2 && corruptManagedIndices.isEmpty()) { - logger.info("Move Metadata succeed, set finish flag to true. Indices failed to get indexed: $failedToIndexIndices") - updateStatusSetting(1) - finishFlag = true; runningLock = false; runTimeCounter = 0 - return - } - if (failedToCleanIndices.isNotEmpty()) { - logger.info("Failed to clean indices: $failedToCleanIndices. Only clean cluster state metadata in this run.") - cleanMetadatas(failedToCleanIndices.toList()) - finishFlag = false; runningLock = false - return - } - } else { - counter = 0; finishFlag = false // index metadata for indices which metadata hasn't been indexed - val bulkIndexReq = - clusterStateManagedIndexMetadata.mapNotNull { it.value }.map { - managedIndexMetadataIndexRequest( - it, - waitRefresh = false, // should be set at bulk request level - create = true // restrict this as create operation - ) - } - // remove the part which gonna be indexed from last time failedToIndex - failedToIndexIndices = failedToIndexIndices.filterKeys { it !in indexUuidMap.keys }.toMutableMap() - successfullyIndexedIndices.clear() - indexMetadatas(bulkIndexReq) - - logger.info("success indexed: ${successfullyIndexedIndices.map { indexUuidMap[it] }}") - logger.info( - "failed indexed: ${failedToIndexIndices.map { indexUuidMap[it.key] }};" + - "failed reason: ${failedToIndexIndices.values.distinct()}" - ) - } - - // clean metadata for indices which metadata already been indexed - val indicesToCleanMetadata = - indexUuidMap.filter { it.key in successfullyIndexedIndices }.map { Index(it.value, it.key) } - .toList() + failedToCleanIndices + corruptManagedIndices - - cleanMetadatas(indicesToCleanMetadata) - if (failedToCleanIndices.isNotEmpty()) { - logger.info("Failed to clean cluster metadata for: ${failedToCleanIndices.map { it.name }}") - } - - runTimeCounter++ - } finally { - runningLock = false - } - } - - private suspend fun updateStatusSetting(status: Int) { - val newSetting = Settings.builder().put(ManagedIndexSettings.METADATA_SERVICE_STATUS.key, status) - val request = ClusterUpdateSettingsRequest().persistentSettings(newSetting) - retryPolicy.retry(logger, listOf(RestStatus.INTERNAL_SERVER_ERROR)) { - client.admin().cluster().updateSettings(request, updateSettingListener(status)) - } - } - - private fun updateSettingListener(status: Int): ActionListener { - return object : ActionListener { - override fun onFailure(e: Exception) { - logger.error("Failed to update template migration setting to $status", e) - throw IndexManagementException.wrap(Exception("Failed to update template migration setting to $status")) - } - - override fun onResponse(response: ClusterUpdateSettingsResponse) { - if (!response.isAcknowledged) { - logger.error("Update metadata migration setting to $status is not acknowledged") - throw IndexManagementException.wrap( - Exception("Update metadata migration setting to $status is not acknowledged") - ) - } else { - logger.info("Successfully metadata template migration setting to $status") - } - } - } - } - - private suspend fun indexMetadatas(requests: List>) { - if (requests.isEmpty()) return - var requestsToRetry = requests - - // when we try to index sth to config index - // we need to make sure the schema is up to date - if (!imIndices.attemptUpdateConfigIndexMapping()) { - logger.error("Failed to update config index mapping.") - return - } - - retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry) - val bulkResponse: BulkResponse = client.suspendUntil { bulk(bulkRequest, it) } - val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } - - val retryIndexUuids = mutableListOf() - bulkResponse.items.forEach { - val indexUuid = revertManagedIndexMetadataID(it.id) - if (it.isFailed) { - if (it.status() == RestStatus.TOO_MANY_REQUESTS) { - retryIndexUuids.add(it.itemId) - } else { - logger.error("failed reason: ${it.failure}, ${it.failureMessage}") - failedToIndexIndices[indexUuid] = it.failure - } - } else { - successfullyIndexedIndices.add(indexUuid) - failedToIndexIndices.remove(indexUuid) - } - } - requestsToRetry = retryIndexUuids.map { bulkRequest.requests()[it] } - - if (requestsToRetry.isNotEmpty()) { - val retryCause = failedResponses.first { it.status() == RestStatus.TOO_MANY_REQUESTS }.failure.cause - throw ExceptionsHelper.convertToOpenSearchException(retryCause) - } - } - } - - private suspend fun cleanMetadatas(indices: List) { - if (indices.isEmpty()) return - - val request = UpdateManagedIndexMetaDataRequest(indicesToRemoveManagedIndexMetaDataFrom = indices) - try { - retryPolicy.retry(logger) { - val response: AcknowledgedResponse = - client.suspendUntil { execute(UpdateManagedIndexMetaDataAction.INSTANCE, request, it) } - if (response.isAcknowledged) { - failedToCleanIndices.removeAll(indices) - } else { - logger.error("Failed to clean cluster state metadata for indices: [$indices].") - failedToCleanIndices.addAll(indices) - } - } - } catch (e: Exception) { - logger.error("Failed to clean cluster state metadata for indices: [$indices].", e) - failedToCleanIndices.addAll(indices) - } - } -} - -typealias MetadataDocID = String -typealias IndexUuid = String -typealias IndexName = String diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/ShrinkAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/ShrinkAction.kt index 342ab21d8..91a3a1a24 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/ShrinkAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/ShrinkAction.kt @@ -132,8 +132,7 @@ class ShrinkAction( const val TARGET_INDEX_TEMPLATE_FIELD = "target_index_name_template" const val ALIASES_FIELD = "aliases" const val FORCE_UNSAFE_FIELD = "force_unsafe" - const val LOCK_RESOURCE_TYPE = "shrink" - const val LOCK_RESOURCE_NAME = "node_name" + const val LOCK_SOURCE_JOB_ID = "shrink-node_name" fun getSecurityFailureMessage(failure: String) = "Shrink action failed because of missing permissions: $failure" } } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServices.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServices.kt deleted file mode 100644 index 542ac5647..000000000 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServices.kt +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement.migration - -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.withContext -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.action.ActionListener -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.DocWriteResponse -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse -import org.opensearch.action.admin.indices.template.post.SimulateIndexTemplateResponse -import org.opensearch.action.admin.indices.template.post.SimulateTemplateAction -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.bulk.BulkItemResponse -import org.opensearch.action.get.MultiGetRequest -import org.opensearch.action.get.MultiGetResponse -import org.opensearch.action.update.UpdateRequest -import org.opensearch.action.update.UpdateResponse -import org.opensearch.client.Client -import org.opensearch.cluster.metadata.Template -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.indexmanagement.IndexManagementIndices -import org.opensearch.indexmanagement.IndexManagementPlugin -import org.opensearch.indexmanagement.indexstatemanagement.model.ISMTemplate -import org.opensearch.indexmanagement.indexstatemanagement.model.Policy -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.util.updateISMTemplateRequest -import org.opensearch.indexmanagement.opensearchapi.parseWithType -import org.opensearch.indexmanagement.opensearchapi.retry -import org.opensearch.indexmanagement.opensearchapi.suspendUntil -import org.opensearch.indexmanagement.util.OpenForTesting -import org.opensearch.rest.RestStatus -import java.time.Instant - -@OpenForTesting -@Suppress("MagicNumber", "ReturnCount", "ThrowsCount", "TooManyFunctions", "ComplexMethod", "NestedBlockDepth") -class ISMTemplateService( - private val client: Client, - private val clusterService: ClusterService, - private val xContentRegistry: NamedXContentRegistry, - private val imIndices: IndexManagementIndices -) { - - private val logger = LogManager.getLogger(javaClass) - - @Volatile final var finishFlag = false - private set - fun reenableTemplateMigration() { finishFlag = false } - - @Volatile var runTimeCounter = 0 - - private var ismTemplateMap = mutableMapOf>() - private val v1TemplatesWithPolicyID = mutableMapOf() - - private val negOrderToPositive = mutableMapOf() - private val v1orderToTemplatesName = mutableMapOf>() - private val v1orderToBucketIncrement = mutableMapOf() - - private val policiesToUpdate = mutableMapOf() - private val policiesFailedToUpdate = mutableMapOf() - private lateinit var lastUpdatedTime: Instant - - @Volatile private var retryPolicy = - BackoffPolicy.constantBackoff(TimeValue.timeValueMillis(50), 3) - - suspend fun doMigration(timeStamp: Instant) { - if (runTimeCounter >= 10) { - stopMigration(-2) - return - } - logger.info("Doing ISM template migration ${++runTimeCounter} time.") - cleanCache() - - lastUpdatedTime = timeStamp.minusSeconds(3600) - logger.info("Use $lastUpdatedTime as migrating ISM template last_updated_time") - - getIndexTemplates() - logger.info("ISM templates: $ismTemplateMap") - - getISMPolicies() - logger.info("Policies to update: ${policiesToUpdate.keys}") - - updateISMPolicies() - - if (policiesToUpdate.isEmpty()) { - stopMigration(-1) - } - } - - private fun stopMigration(successFlag: Long) { - finishFlag = true - val newSetting = Settings.builder().put(ManagedIndexSettings.TEMPLATE_MIGRATION_CONTROL.key, successFlag) - val request = ClusterUpdateSettingsRequest().persistentSettings(newSetting) - client.admin().cluster().updateSettings(request, updateSettingListener()) - logger.info("Failure experienced when migrating ISM Template and update ISM policies: $policiesFailedToUpdate") - // TODO what if update setting failed, cannot reset to -1/-2 - runTimeCounter = 0 - } - - private fun updateSettingListener(): ActionListener { - return object : ActionListener { - override fun onFailure(e: Exception) { - logger.error("Failed to update template migration setting", e) - } - - override fun onResponse(response: ClusterUpdateSettingsResponse) { - if (!response.isAcknowledged) { - logger.error("Update template migration setting is not acknowledged") - } else { - logger.info("Successfully update template migration setting") - } - } - } - } - - private suspend fun getIndexTemplates() { - processNegativeOrder() - - bucketizeV1TemplatesByOrder() - populateBucketPriority() - populateV1Template() - - clusterService.state().metadata.templatesV2().forEach { - val template = it.value - val indexPatterns = template.indexPatterns() - val priority = template.priorityOrZero().toInt() - val policyIDSetting = simulateTemplate(it.key) - if (policyIDSetting != null) { - populateV2ISMTemplateMap(policyIDSetting, indexPatterns, priority) - } - } - } - - // old v1 template can have negative priority - // map the negative priority to non-negative value - private fun processNegativeOrder() { - val negOrderSet = mutableSetOf() - clusterService.state().metadata.templates.forEach { - val policyIDSetting = ManagedIndexSettings.POLICY_ID.get(it.value.settings()) - if (policyIDSetting != "") { - val priority = it.value.order - if (priority < 0) { - negOrderSet.add(priority) - } - // cache pattern and policyID for v1 template - v1TemplatesWithPolicyID[it.key] = V1TemplateCache(it.value.patterns(), 0, policyIDSetting) - } - } - val sorted = negOrderSet.sorted() - var p = 0 - for (i in sorted) { - negOrderToPositive[i] = p++ - } - } - - private fun normalizePriority(order: Int): Int { - if (order < 0) return negOrderToPositive[order] ?: 0 - return order + (negOrderToPositive.size) - } - - private fun bucketizeV1TemplatesByOrder() { - clusterService.state().metadata.templates.forEach { - val v1TemplateCache = v1TemplatesWithPolicyID[it.key] - if (v1TemplateCache != null) { - val priority = normalizePriority(it.value.order) - // cache the non-negative priority - v1TemplatesWithPolicyID[it.key] = v1TemplateCache.copy(order = priority) - - val bucket = v1orderToTemplatesName[priority] - if (bucket == null) { - v1orderToTemplatesName[priority] = mutableListOf(it.key) - } else { - // add later one to start of the list - bucket.add(0, it.key) - } - } - } - } - - private fun populateBucketPriority() { - v1orderToTemplatesName.forEach { (order, templateNames) -> - var increase = 0 - templateNames.forEach { - val v1TemplateCache = v1TemplatesWithPolicyID[it] - if (v1TemplateCache != null) { - val cachePriority = v1TemplateCache.order - v1TemplatesWithPolicyID[it] = v1TemplateCache - .copy(order = cachePriority + increase) - } - increase++ - } - v1orderToBucketIncrement[order] = templateNames.size - 1 - } - } - - private fun populateV1Template() { - val allOrders = v1orderToTemplatesName.keys.toList().sorted() - allOrders.forEachIndexed { ind, order -> - val smallerOrders = allOrders.subList(0, ind) - val increments = smallerOrders.mapNotNull { v1orderToBucketIncrement[it] }.sum() - - val templates = v1orderToTemplatesName[order] - templates?.forEach { - val v1TemplateCache = v1TemplatesWithPolicyID[it] - if (v1TemplateCache != null) { - val policyID = v1TemplateCache.policyID - val indexPatterns = v1TemplateCache.patterns - val priority = v1TemplateCache.order + increments - saveISMTemplateToMap(policyID, ISMTemplate(indexPatterns, priority, lastUpdatedTime)) - } - } - } - } - - private fun saveISMTemplateToMap(policyID: String, ismTemplate: ISMTemplate) { - val policyToISMTemplate = ismTemplateMap[policyID] - if (policyToISMTemplate != null) { - policyToISMTemplate.add(ismTemplate) - } else { - ismTemplateMap[policyID] = mutableListOf(ismTemplate) - } - } - - private suspend fun simulateTemplate(templateName: String): String? { - val request = SimulateTemplateAction.Request(templateName) - val response: SimulateIndexTemplateResponse = - client.suspendUntil { execute(SimulateTemplateAction.INSTANCE, request, it) } - - var policyIDSetting: String? = null - withContext(Dispatchers.IO) { - val out = BytesStreamOutput().also { response.writeTo(it) } - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val resolvedTemplate = sin.readOptionalWriteable(::Template) - if (resolvedTemplate != null) { - policyIDSetting = ManagedIndexSettings.POLICY_ID.get(resolvedTemplate.settings()) - } - } - return policyIDSetting - } - - private fun populateV2ISMTemplateMap(policyID: String, indexPatterns: List, priority: Int) { - var v1Increment = 0 - val v1MaxOrder = v1orderToBucketIncrement.keys.maxOrNull() - if (v1MaxOrder != null) { - v1Increment = v1MaxOrder + v1orderToBucketIncrement.values.sum() - } - - saveISMTemplateToMap(policyID, ISMTemplate(indexPatterns, normalizePriority(priority) + v1Increment, lastUpdatedTime)) - } - - private suspend fun getISMPolicies() { - if (ismTemplateMap.isEmpty()) return - - val mReq = MultiGetRequest() - ismTemplateMap.keys.forEach { mReq.add(IndexManagementPlugin.INDEX_MANAGEMENT_INDEX, it) } - try { - val mRes: MultiGetResponse = client.suspendUntil { multiGet(mReq, it) } - policiesToUpdate.clear() - mRes.forEach { - if (it.response != null && !it.response.isSourceEmpty && !it.isFailed) { - val response = it.response - var policy: Policy? = null - try { - policy = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, - XContentType.JSON - ).use { xcp -> - xcp.parseWithType(response.id, response.seqNo, response.primaryTerm, Policy.Companion::parse) - } - } catch (e: Exception) { - logger.error("Failed to parse policy [${response.id}] when migrating templates", e) - } - - if (policy?.ismTemplate == null) { - policiesToUpdate[it.id] = Pair(response.seqNo, response.primaryTerm) - } - } - } - } catch (e: ActionRequestValidationException) { - logger.warn("ISM config index not exists when migrating templates.") - } - } - - private suspend fun updateISMPolicies() { - if (policiesToUpdate.isEmpty()) return - - if (!imIndices.attemptUpdateConfigIndexMapping()) { - logger.error("Failed to update config index mapping.") - return - } - - var requests = mutableListOf() - policiesToUpdate.forEach { policyID, (seqNo, priTerm) -> - val ismTemplates = ismTemplateMap[policyID] - if (ismTemplates != null) - requests.add(updateISMTemplateRequest(policyID, ismTemplates, seqNo, priTerm)) - } - - retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val failedRequests = mutableListOf() - var retryCause: Exception? = null - requests.forEach { req -> - var res: UpdateResponse? = null - try { - res = client.suspendUntil { update(req, it) } - logger.info("update policy for ${req.id()}") - if (res?.result == DocWriteResponse.Result.UPDATED) { - policiesToUpdate.remove(req.id()) - } - } catch (e: Exception) { - logger.info("failed to update policy for ${req.id()}") - if (res?.status() == RestStatus.TOO_MANY_REQUESTS) { - failedRequests.add(req) - retryCause = e - } else { - logger.error("Failed to update policy ${req.id()} with ISM template", e) - } - } - } - - if (failedRequests.isNotEmpty()) { - requests = failedRequests - throw ExceptionsHelper.convertToOpenSearchException(retryCause) - } - } - } - - private fun cleanCache() { - ismTemplateMap.clear() - v1TemplatesWithPolicyID.clear() - v1orderToTemplatesName.clear() - v1orderToBucketIncrement.clear() - negOrderToPositive.clear() - policiesToUpdate.clear() - } -} - -data class V1TemplateCache( - val patterns: List, - val order: Int, - val policyID: String -) - -typealias policyID = String -typealias templateName = String -typealias seqNoPrimaryTerm = Pair diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Chime.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Chime.kt index 55d91fe88..064a9957b 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Chime.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Chime.kt @@ -5,7 +5,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.model.destination -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/CustomWebhook.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/CustomWebhook.kt index ff508f82b..4220af107 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/CustomWebhook.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/CustomWebhook.kt @@ -5,7 +5,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.model.destination -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Slack.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Slack.kt index bfed1ead9..f79c97026 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Slack.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/model/destination/Slack.kt @@ -5,7 +5,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.model.destination -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/opensearchapi/OpenSearchExtensions.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/opensearchapi/OpenSearchExtensions.kt index 157676f00..cb7e4c6ad 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/opensearchapi/OpenSearchExtensions.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/opensearchapi/OpenSearchExtensions.kt @@ -199,8 +199,6 @@ fun XContentBuilder.buildMetadata(name: String, metadata: ToXContentFragment, pa // Get the oldest rollover time or null if index was never rolled over fun IndexMetadata.getOldestRolloverTime(): Instant? { - return this.rolloverInfos.values() - .map { it.value.time } - .minOrNull() // oldest should be min as its epoch time + return this.rolloverInfos.values.minOfOrNull { it.time } // oldest should be min as its epoch time ?.let { Instant.ofEpochMilli(it) } } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestAddPolicyAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestAddPolicyAction.kt index 190ffa775..6e509b4f6 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestAddPolicyAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestAddPolicyAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.core.xcontent.MediaType import org.opensearch.common.xcontent.XContentHelper import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.ISM_BASE_URI diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestChangePolicyAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestChangePolicyAction.kt index 0c3552d13..be123df24 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestChangePolicyAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestChangePolicyAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.core.xcontent.XContentParser.Token import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.ISM_BASE_URI diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestExplainAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestExplainAction.kt index 5852e3302..c9bde8bbf 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestExplainAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestExplainAction.kt @@ -7,7 +7,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.logging.DeprecationLogger import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.ISM_BASE_URI import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.LEGACY_ISM_BASE_URI diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRemovePolicyAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRemovePolicyAction.kt index 7e7d79307..33720ae70 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRemovePolicyAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRemovePolicyAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.ISM_BASE_URI import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.LEGACY_ISM_BASE_URI import org.opensearch.indexmanagement.indexstatemanagement.transport.action.removepolicy.RemovePolicyAction diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRetryFailedManagedIndexAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRetryFailedManagedIndexAction.kt index 479e3f071..ee07ddeaf 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRetryFailedManagedIndexAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/resthandler/RestRetryFailedManagedIndexAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.indexstatemanagement.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.logging.DeprecationLogger import org.opensearch.core.xcontent.MediaType import org.opensearch.common.xcontent.XContentHelper diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptMoveShardsStep.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptMoveShardsStep.kt index 6d1764e4c..9754f5d8a 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptMoveShardsStep.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptMoveShardsStep.kt @@ -28,9 +28,9 @@ import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction import org.opensearch.indexmanagement.indexstatemanagement.model.ManagedIndexConfig import org.opensearch.indexmanagement.indexstatemanagement.util.getIntervalFromManagedIndexConfig import org.opensearch.indexmanagement.indexstatemanagement.util.getManagedIndexConfig -import org.opensearch.indexmanagement.indexstatemanagement.util.getNodeFreeMemoryAfterShrink +import org.opensearch.indexmanagement.indexstatemanagement.util.getNodeFreeDiskSpaceAfterShrink import org.opensearch.indexmanagement.indexstatemanagement.util.getShardIdToNodeNameSet -import org.opensearch.indexmanagement.indexstatemanagement.util.getShrinkLockID +import org.opensearch.indexmanagement.indexstatemanagement.util.getShrinkJobID import org.opensearch.indexmanagement.indexstatemanagement.util.isIndexGreen import org.opensearch.indexmanagement.indexstatemanagement.util.issueUpdateSettingsRequest import org.opensearch.indexmanagement.opensearchapi.convertToMap @@ -60,43 +60,67 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, override suspend fun wrappedExecute(context: StepContext): AttemptMoveShardsStep { val client = context.client val indexName = context.metadata.index - val shrinkTargetIndexName = - compileTemplate(action.targetIndexTemplate, context.metadata, indexName + DEFAULT_TARGET_SUFFIX, context.scriptService) - - if (targetIndexNameIsInvalid(context.clusterService, shrinkTargetIndexName)) return this + // Only index at green status can be shrunk if (!isIndexGreen(client, indexName)) { info = mapOf("message" to INDEX_NOT_GREEN_MESSAGE) stepStatus = StepStatus.CONDITION_NOT_MET return this } - if (shouldFailUnsafe(context.clusterService, indexName)) return this + // Check if target index name is valid + val shrinkTargetIndexName = + compileTemplate( + action.targetIndexTemplate, + context.metadata, + indexName + DEFAULT_TARGET_SUFFIX, + context.scriptService + ) + if (targetIndexNameIsInvalid(context.clusterService, shrinkTargetIndexName)) return this - // If there is only one primary shard we complete the step and in getUpdatedManagedIndexMetadata will start a no-op - val numOriginalShards = context.clusterService.state().metadata.indices[indexName].numberOfShards - if (numOriginalShards == 1) { - info = mapOf("message" to ONE_PRIMARY_SHARD_MESSAGE) - stepStatus = StepStatus.COMPLETED - return this - } + if (shouldFailUnsafe(context.clusterService, indexName)) return this // Get stats on index size and docs val (statsStore, statsDocs, shardStats) = getIndexStats(indexName, client) ?: return this val indexSize = statsStore.sizeInBytes + // Get stats of current and target shards + val numOriginalShards = context.clusterService.state().metadata.indices[indexName]?.numberOfShards + ?: error("numOriginalShards should not be null") val numTargetShards = getNumTargetShards(numOriginalShards, indexSize) if (shouldFailTooManyDocuments(statsDocs, numTargetShards)) return this - val originalIndexSettings = getOriginalSettings(indexName, context.clusterService) + // If there is only one primary shard we complete the step and in getUpdatedManagedIndexMetadata will start a no-op + if (numOriginalShards == 1) { + info = mapOf("message" to ONE_PRIMARY_SHARD_MESSAGE) + stepStatus = StepStatus.COMPLETED + return this + } + // If source index already has the shard count equal to target number of shard, we complete the step + if (numOriginalShards == numTargetShards) { + info = mapOf("message" to NO_SHARD_COUNT_CHANGE_MESSAGE) + stepStatus = StepStatus.COMPLETED + return this + } - // get the nodes with enough memory in increasing order of free space - val suitableNodes = findSuitableNodes(context, shardStats, indexSize) + // Get original index settings; WaitForShrinkStep.resetReadOnlyAndRouting method resets write block and routing node after shrink completed. + val originalIndexSettings = getOriginalSettings(indexName, context.clusterService) // Get the job interval to use in determining the lock length val interval = getJobIntervalSeconds(context.metadata.indexUuid, client) - // iterate through the nodes and try to acquire a lock on one - val (lock, nodeName) = acquireLockFromNodeList(context.lockService, suitableNodes, interval, indexName) ?: return this + + // Get candidate nodes for shrink + val suitableNodes = findSuitableNodes(context, shardStats, indexSize) + + if (suitableNodes.isEmpty()) { + info = mapOf("message" to NO_AVAILABLE_NODES_MESSAGE) + stepStatus = StepStatus.CONDITION_NOT_MET + return this + } + + // Iterate through the suitable nodes and try to acquire a lock on one + val (lock, nodeName) = acquireLockFromNodeList(context.lockService, suitableNodes, interval, indexName) + ?: return this shrinkActionProperties = ShrinkActionProperties( nodeName, shrinkTargetIndexName, @@ -116,7 +140,10 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, override fun getGenericFailureMessage(): String = FAILURE_MESSAGE - private suspend fun getIndexStats(indexName: String, client: Client): Triple>? { + private suspend fun getIndexStats( + indexName: String, + client: Client + ): Triple>? { val statsRequest = IndicesStatsRequest().indices(indexName) val statsResponse: IndicesStatsResponse = client.admin().indices().suspendUntil { stats(statsRequest, it) @@ -125,7 +152,10 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, val statsDocs = statsResponse.total.docs val statsShards = statsResponse.shards if (statsStore == null || statsDocs == null || statsShards == null) { - fail(FAILURE_MESSAGE, "Failed to move shards in shrink action as IndicesStatsResponse was missing some stats.") + setStepFailed( + FAILURE_MESSAGE, + "Failed to move shards in shrink action as IndicesStatsResponse was missing some stats." + ) return null } return Triple(statsStore, statsDocs, statsShards) @@ -172,7 +202,7 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, val totalDocs: Long = docsStats.count val docsPerTargetShard: Long = totalDocs / numTargetShards if (docsPerTargetShard > MAXIMUM_DOCS_PER_SHARD) { - fail(TOO_MANY_DOCS_FAILURE_MESSAGE, TOO_MANY_DOCS_FAILURE_MESSAGE) + setStepFailed(TOO_MANY_DOCS_FAILURE_MESSAGE, TOO_MANY_DOCS_FAILURE_MESSAGE) return true } return false @@ -186,11 +216,11 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, private fun shouldFailUnsafe(clusterService: ClusterService, indexName: String): Boolean { // If forceUnsafe is set and is true, then we don't even need to check the number of replicas if (action.forceUnsafe == true) return false - val numReplicas = clusterService.state().metadata.indices[indexName].numberOfReplicas + val numReplicas = clusterService.state().metadata.indices[indexName]?.numberOfReplicas val shouldFailForceUnsafeCheck = numReplicas == 0 if (shouldFailForceUnsafeCheck) { logger.info(UNSAFE_FAILURE_MESSAGE) - fail(UNSAFE_FAILURE_MESSAGE) + setStepFailed(UNSAFE_FAILURE_MESSAGE) return true } return false @@ -200,17 +230,23 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, val indexExists = clusterService.state().metadata.indices.containsKey(shrinkTargetIndexName) if (indexExists) { val indexExistsMessage = getIndexExistsMessage(shrinkTargetIndexName) - fail(indexExistsMessage, indexExistsMessage) + setStepFailed(indexExistsMessage, indexExistsMessage) return true } - val exceptionGenerator: (String, String) -> RuntimeException = { index_name, reason -> InvalidIndexNameException(index_name, reason) } + val exceptionGenerator: (String, String) -> RuntimeException = { index_name, reason -> + InvalidIndexNameException(index_name, reason) + } // If the index name is invalid for any reason, this will throw an exception giving the reason why in the message. // That will be displayed to the user as the cause. validateIndexOrAliasName(shrinkTargetIndexName, exceptionGenerator) return false } - private suspend fun setToReadOnlyAndMoveIndexToNode(stepContext: StepContext, node: String, lock: LockModel): Boolean { + private suspend fun setToReadOnlyAndMoveIndexToNode( + stepContext: StepContext, + node: String, + lock: LockModel + ): Boolean { val updateSettings = Settings.builder() .put(SETTING_BLOCKS_WRITE, true) .put(ROUTING_SETTING, node) @@ -223,10 +259,10 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, } finally { isUpdateAcknowledged = response != null && response.isAcknowledged if (!isUpdateAcknowledged) { - fail(UPDATE_FAILED_MESSAGE, UPDATE_FAILED_MESSAGE) + setStepFailed(UPDATE_FAILED_MESSAGE, UPDATE_FAILED_MESSAGE) val released: Boolean = lockService.suspendUntil { release(lock, it) } if (!released) { - logger.error("Failed to release Shrink action lock on node [$node]") + logger.error("Failed to release Shrink action lock on node: [$node]") } } } @@ -244,45 +280,63 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, indexName: String ): Pair? { for (nodeName in suitableNodes) { - val lockID = getShrinkLockID(nodeName) + val lockID = getShrinkJobID(nodeName) val lock: LockModel? = lockService.suspendUntil { acquireLockWithId(INDEX_MANAGEMENT_INDEX, getShrinkLockDuration(jobIntervalSeconds), lockID, it) } if (lock != null) { return lock to nodeName + } else { + logger.info("Shrink action could not acquire lock of node [$nodeName] for [$indexName] .") } } - logger.info("Shrink action could not find available node to shrink onto for index [$indexName].") - info = mapOf("message" to NO_AVAILABLE_NODES_MESSAGE) + info = mapOf("message" to NO_UNLOCKED_NODES_MESSAGE) stepStatus = StepStatus.CONDITION_NOT_MET return null } /* - * Returns the list of node names for nodes with enough space to shrink to, in increasing order of space available + * Returns the list of available nodes for shrink, in increasing order of space available */ - @SuppressWarnings("NestedBlockDepth", "ComplexMethod") + @SuppressWarnings("NestedBlockDepth", "ComplexMethod", "LongMethod") private suspend fun findSuitableNodes( stepContext: StepContext, shardStats: Array, indexSizeInBytes: Long ): List { val nodesStatsReq = NodesStatsRequest().addMetric(FS_METRIC) - val nodeStatsResponse: NodesStatsResponse = stepContext.client.admin().cluster().suspendUntil { nodesStats(nodesStatsReq, it) } + val nodeStatsResponse: NodesStatsResponse = stepContext.client.admin().cluster().suspendUntil { + nodesStats(nodesStatsReq, it) + } val nodesList = nodeStatsResponse.nodes.filter { it.node.isDataNode } + val suitableNodes: ArrayList = ArrayList() + // Sort in increasing order of keys, in our case this is memory remaining - val comparator = kotlin.Comparator { o1: Tuple, o2: Tuple -> o1.v1().compareTo(o2.v1()) } + val comparator = kotlin.Comparator { o1: Tuple, o2: Tuple -> + o1.v1().compareTo(o2.v1()) + } val nodesWithSpace = PriorityQueue(comparator) for (node in nodesList) { - // Gets the amount of memory in the node which will be free below the high watermark level after adding 2*indexSizeInBytes, + // Gets the amount of disk space in the node which will be free below the high watermark level after adding 2*indexSizeInBytes, // as the source index is duplicated during the shrink - val remainingMem = getNodeFreeMemoryAfterShrink(node, indexSizeInBytes, stepContext.clusterService.clusterSettings) - if (remainingMem > 0L) { - nodesWithSpace.add(Tuple(remainingMem, node.node.name)) + val remainingDiskSpace = getNodeFreeDiskSpaceAfterShrink( + node, + indexSizeInBytes, + stepContext.clusterService.clusterSettings + ) + if (remainingDiskSpace > 0L) { + nodesWithSpace.add(Tuple(remainingDiskSpace, node.node.name)) } } - val shardIdToNodeList: Map> = getShardIdToNodeNameSet(shardStats, stepContext.clusterService.state().nodes) - val suitableNodes: ArrayList = ArrayList() + // If no node has enough disk space, skip next step + if (nodesWithSpace.size < 1) { + logger.info("No node has enough disk space for shrink action.") + return suitableNodes + } + val shardIdToNodeList: Map> = getShardIdToNodeNameSet( + shardStats, + stepContext.clusterService.state().nodes + ) // For each node, do a dry run of moving all shards to the node to make sure that there aren't any other blockers // to the allocation. for (sizeNodeTuple in nodesWithSpace) { @@ -294,18 +348,38 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, val shardId = shard.shardRouting.shardId() val currentShardNode = stepContext.clusterService.state().nodes[shard.shardRouting.currentNodeId()] // Don't attempt a dry run for shards which have a copy already on that node - if (shardIdToNodeList[shardId.id]?.contains(targetNodeName) == true || requestedShardIds.contains(shardId.id)) continue - clusterRerouteRequest.add(MoveAllocationCommand(indexName, shardId.id, currentShardNode.name, targetNodeName)) + if (shardIdToNodeList[shardId.id]?.contains(targetNodeName) == true || requestedShardIds.contains( + shardId.id + ) + ) continue + clusterRerouteRequest.add( + MoveAllocationCommand(indexName, shardId.id, currentShardNode.name, targetNodeName) + ) requestedShardIds.add(shardId.id) } val clusterRerouteResponse: ClusterRerouteResponse = stepContext.client.admin().cluster().suspendUntil { reroute(clusterRerouteRequest, it) } - val numYesDecisions = clusterRerouteResponse.explanations.explanations().count { it.decisions().type().equals((Decision.Type.YES)) } - // Should be the same number of yes decisions as the number of requests - if (numYesDecisions == requestedShardIds.size) { + val numOfDecisions = clusterRerouteResponse.explanations.explanations().size + val numNoDecisions = clusterRerouteResponse.explanations.explanations().count { + it.decisions().type().equals((Decision.Type.NO)) + } + val numYesDecisions = clusterRerouteResponse.explanations.explanations().count { + it.decisions().type().equals((Decision.Type.YES)) + } + val numThrottleDecisions = clusterRerouteResponse.explanations.explanations().count { + it.decisions().type().equals((Decision.Type.THROTTLE)) + } + logger.debug( + getShardMovingDecisionInfo(numNoDecisions, numYesDecisions, numThrottleDecisions, targetNodeName) + ) + // NO decision type is not counted; YES and THROTTLE decision type are available for shrink. + if (numOfDecisions - numNoDecisions >= requestedShardIds.size) { suitableNodes.add(sizeNodeTuple.v2()) } } + if (suitableNodes.size < 1) { + logger.info("No node has shard moving permission for shrink action.") + } return suitableNodes } @@ -397,27 +471,40 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, const val ROUTING_SETTING = "index.routing.allocation.require._name" const val DEFAULT_TARGET_SUFFIX = "_shrunken" const val name = "attempt_move_shards_step" - const val UPDATE_FAILED_MESSAGE = "Shrink failed because shard settings could not be updated." + const val UPDATE_FAILED_MESSAGE = "Shrink action failed because shard settings could not be updated." const val NO_AVAILABLE_NODES_MESSAGE = - "There are no available nodes to move to to execute a shrink. Delaying until node becomes available." + "There is no node with enough disk space or shard moving permission for a shrink action." + const val NO_UNLOCKED_NODES_MESSAGE = + "Candidate node for shrink action is locked for other actions. Delaying until gets unlocked." const val UNSAFE_FAILURE_MESSAGE = "Shrink failed because index has no replicas and force_unsafe is not set to true." - const val ONE_PRIMARY_SHARD_MESSAGE = "Shrink action did not do anything because source index only has one primary shard." - const val TOO_MANY_DOCS_FAILURE_MESSAGE = "Shrink failed because there would be too many documents on each target shard following the shrink." - const val INDEX_NOT_GREEN_MESSAGE = "Shrink action cannot start moving shards as the index is not green." - const val FAILURE_MESSAGE = "Shrink failed to start moving shards." + const val ONE_PRIMARY_SHARD_MESSAGE = "Source index only has one primary shard. Skip this shrink execution." + const val NO_SHARD_COUNT_CHANGE_MESSAGE = "Source index already has target number of shards. Skip this shrink execution." + const val TOO_MANY_DOCS_FAILURE_MESSAGE = "Shrink action failed due to too many documents on each target shard after shrink." + const val INDEX_NOT_GREEN_MESSAGE = "Shrink action cannot continue as the index is not green." + const val FAILURE_MESSAGE = "Shrink action failed due to initial moving shards failure." private const val DEFAULT_LOCK_INTERVAL = 3L * 60L * 60L // Default lock interval is 3 hours in seconds private const val MILLISECONDS_IN_SECOND = 1000L const val THIRTY_SECONDS_IN_MILLIS = 30L * MILLISECONDS_IN_SECOND private const val JOB_INTERVAL_LOCK_MULTIPLIER = 3 private const val LOCK_BUFFER_SECONDS = 1800 private const val MAXIMUM_DOCS_PER_SHARD = 0x80000000 // The maximum number of documents per shard is 2^31 - fun getSuccessMessage(node: String) = "Successfully started moving the shards to $node." + fun getSuccessMessage(node: String) = "Successfully initialized moving the shards to $node for a shrink action." fun getIndexExistsMessage(newIndex: String) = "Shrink failed because $newIndex already exists." + fun getShardMovingDecisionInfo( + noCount: Int, + yesCount: Int, + throttleCount: Int, + node: String + ) = "Shard moving decisions on node $node, NO: $noCount, YES: $yesCount, THROTTLE: $throttleCount." + // If we couldn't get the job interval for the lock, use the default of 12 hours. // Lock is 3x + 30 minutes the job interval to allow the next step's execution to extend the lock without losing it. // If user sets maximum jitter, it could be 2x the job interval before the next step is executed. - private fun getShrinkLockDuration(jobInterval: Long?) = jobInterval?.let { (it * JOB_INTERVAL_LOCK_MULTIPLIER) + LOCK_BUFFER_SECONDS } + private fun getShrinkLockDuration( + jobInterval: Long? + ) = jobInterval?.let { (it * JOB_INTERVAL_LOCK_MULTIPLIER) + LOCK_BUFFER_SECONDS } ?: DEFAULT_LOCK_INTERVAL + private val ALLOWED_TEMPLATE_FIELDS = setOf("index", "indexUuid") } } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptShrinkStep.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptShrinkStep.kt index 023aec502..87dba6361 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptShrinkStep.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/AttemptShrinkStep.kt @@ -11,11 +11,14 @@ import org.opensearch.action.admin.indices.shrink.ResizeRequest import org.opensearch.action.admin.indices.shrink.ResizeResponse import org.opensearch.action.admin.indices.stats.IndicesStatsRequest import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.common.settings.Settings import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction import org.opensearch.indexmanagement.indexstatemanagement.util.INDEX_NUMBER_OF_SHARDS -import org.opensearch.indexmanagement.indexstatemanagement.util.getNodeFreeMemoryAfterShrink +import org.opensearch.indexmanagement.indexstatemanagement.util.getNodeFreeDiskSpaceAfterShrink import org.opensearch.indexmanagement.indexstatemanagement.util.isIndexGreen +import org.opensearch.indexmanagement.indexstatemanagement.util.issueUpdateSettingsRequest import org.opensearch.indexmanagement.opensearchapi.suspendUntil import org.opensearch.indexmanagement.spi.indexstatemanagement.model.ActionProperties import org.opensearch.indexmanagement.spi.indexstatemanagement.model.ManagedIndexMetaData @@ -29,15 +32,18 @@ class AttemptShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru override suspend fun wrappedExecute(context: StepContext): AttemptShrinkStep { val indexName = context.metadata.index // If the returned shrinkActionProperties are null, then the status has been set to failed, just return - val localShrinkActionProperties = updateAndGetShrinkActionProperties(context) ?: return this + val localShrinkActionProperties = checkShrinkActionPropertiesAndRenewLock(context) ?: return this if (!isIndexGreen(context.client, indexName)) { stepStatus = StepStatus.CONDITION_NOT_MET info = mapOf("message" to INDEX_HEALTH_NOT_GREEN_MESSAGE) return this } + if (!isNodeStillSuitable(localShrinkActionProperties.nodeName, indexName, context)) return this + if (!confirmIndexWriteBlock(context, indexName)) return this + // If the resize index api fails, the step will be set to failed and resizeIndex will return false if (!resizeIndex(indexName, localShrinkActionProperties, context)) return this info = mapOf("message" to getSuccessMessage(localShrinkActionProperties.targetIndexName)) @@ -69,7 +75,7 @@ class AttemptShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru cleanupAndFail(FAILURE_MESSAGE, "Shrink action failed as node stats were missing the previously selected node.") return false } - val remainingMem = getNodeFreeMemoryAfterShrink(node, indexSizeInBytes, context.clusterService.clusterSettings) + val remainingMem = getNodeFreeDiskSpaceAfterShrink(node, indexSizeInBytes, context.clusterService.clusterSettings) if (remainingMem < 1L) { cleanupAndFail(NOT_ENOUGH_SPACE_FAILURE_MESSAGE, NOT_ENOUGH_SPACE_FAILURE_MESSAGE) return false @@ -77,6 +83,26 @@ class AttemptShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru return true } + // Set index write block again before sending shrink request, in case of write block flipped by other processes in previous steps. + private suspend fun confirmIndexWriteBlock(stepContext: StepContext, indexName: String): Boolean { + val updateSettings = Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .build() + var response: AcknowledgedResponse? = null + val isUpdateAcknowledged: Boolean + + try { + response = issueUpdateSettingsRequest(stepContext.client, stepContext.metadata.index, updateSettings) + } finally { + isUpdateAcknowledged = response != null && response.isAcknowledged + } + + if (!isUpdateAcknowledged) { + cleanupAndFail(WRITE_BLOCK_FAILED_MESSAGE, "Failed to confirm write block for index: [$indexName] before sending shrink request.") + } + return isUpdateAcknowledged + } + private suspend fun resizeIndex(sourceIndex: String, shrinkActionProperties: ShrinkActionProperties, context: StepContext): Boolean { val targetIndex = shrinkActionProperties.targetIndexName val req = ResizeRequest(targetIndex, sourceIndex) @@ -113,6 +139,7 @@ class AttemptShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru companion object { const val name = "attempt_shrink_step" const val FAILURE_MESSAGE = "Shrink failed when sending shrink request." + const val WRITE_BLOCK_FAILED_MESSAGE = "Failed to set write block before sending shrink request." const val NOT_ENOUGH_SPACE_FAILURE_MESSAGE = "Shrink failed as the selected node no longer had enough free space to shrink to." const val INDEX_HEALTH_NOT_GREEN_MESSAGE = "Shrink delayed because index health is not green." fun getSuccessMessage(newIndex: String) = "Shrink started. $newIndex currently being populated." diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/ShrinkStep.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/ShrinkStep.kt index fe29857c0..d45845cb1 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/ShrinkStep.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/ShrinkStep.kt @@ -55,7 +55,7 @@ abstract class ShrinkStep( protected suspend fun cleanupAndFail(infoMessage: String, logMessage: String? = null, cause: String? = null, e: Exception? = null) { cleanupResources(cleanupSettings, cleanupLock, cleanupTargetIndex) - fail(infoMessage, logMessage, cause, e) + setStepFailed(infoMessage, logMessage, cause, e) } abstract fun getGenericFailureMessage(): String @@ -63,12 +63,11 @@ abstract class ShrinkStep( abstract suspend fun wrappedExecute(context: StepContext): Step @Suppress("ReturnCount") - protected suspend fun updateAndGetShrinkActionProperties(context: StepContext): ShrinkActionProperties? { + protected suspend fun checkShrinkActionPropertiesAndRenewLock(context: StepContext): ShrinkActionProperties? { val actionMetadata = context.metadata.actionMetaData var localShrinkActionProperties = actionMetadata?.actionProperties?.shrinkActionProperties - shrinkActionProperties = localShrinkActionProperties if (localShrinkActionProperties == null) { - cleanupAndFail(METADATA_FAILURE_MESSAGE, METADATA_FAILURE_MESSAGE) + setStepFailed(METADATA_FAILURE_MESSAGE, METADATA_FAILURE_MESSAGE) return null } val lock = renewShrinkLock(localShrinkActionProperties, context.lockService, logger) @@ -85,7 +84,7 @@ abstract class ShrinkStep( return localShrinkActionProperties } - protected fun fail(infoMessage: String, logMessage: String? = null, cause: String? = null, e: Exception? = null) { + protected fun setStepFailed(infoMessage: String, logMessage: String? = null, cause: String? = null, e: Exception? = null) { if (logMessage != null) { if (e != null) { logger.error(logMessage, e) @@ -95,7 +94,6 @@ abstract class ShrinkStep( } info = if (cause == null) mapOf("message" to infoMessage) else mapOf("message" to infoMessage, "cause" to cause) stepStatus = StepStatus.FAILED - shrinkActionProperties = null } protected suspend fun cleanupResources(resetSettings: Boolean, releaseLock: Boolean, deleteTargetIndex: Boolean) { @@ -104,6 +102,7 @@ abstract class ShrinkStep( if (resetSettings) resetIndexSettings(localShrinkActionProperties) if (deleteTargetIndex) deleteTargetIndex(localShrinkActionProperties) if (releaseLock) releaseLock(localShrinkActionProperties) + shrinkActionProperties = null } else { logger.error("Shrink action failed to clean up resources due to null shrink action properties.") } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForMoveShardsStep.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForMoveShardsStep.kt index a8dcb6fcd..943f59279 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForMoveShardsStep.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForMoveShardsStep.kt @@ -26,14 +26,15 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, override suspend fun wrappedExecute(context: StepContext): WaitForMoveShardsStep { val indexName = context.metadata.index // If the returned shrinkActionProperties are null, then the status has been set to failed, just return - val localShrinkActionProperties = updateAndGetShrinkActionProperties(context) ?: return this + val localShrinkActionProperties = checkShrinkActionPropertiesAndRenewLock(context) ?: return this val shardStats = getShardStats(indexName, context.client) ?: return this val numShardsInSync = getNumShardsInSync(shardStats, context.clusterService.state(), indexName) val nodeToMoveOnto = localShrinkActionProperties.nodeName val numShardsOnNode = getNumShardsWithCopyOnNode(shardStats, context.clusterService.state(), nodeToMoveOnto) - val numPrimaryShards = context.clusterService.state().metadata.indices[indexName].numberOfShards + val numPrimaryShards = context.clusterService.state().metadata.indices[indexName]?.numberOfShards + ?: error("numberOfShards should not be null") // If a copy of each shard is on the node, and all shards are in sync, move on if (numShardsOnNode >= numPrimaryShards && numShardsInSync >= numPrimaryShards) { @@ -49,8 +50,8 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, // Returns the number of shard IDs where all primary and replicas are in sync private fun getNumShardsInSync(shardStats: Array, state: ClusterState, indexName: String): Int { - val numReplicas = state.metadata.indices[indexName].numberOfReplicas - val inSyncAllocations = state.metadata.indices[indexName].inSyncAllocationIds + val numReplicas = state.metadata.indices[indexName]?.numberOfReplicas ?: error("numberOfReplicas should not be null") + val inSyncAllocations = state.metadata.indices[indexName]?.inSyncAllocationIds var numShardsInSync = 0 for (shard: ShardStats in shardStats) { val routingInfo = shard.shardRouting @@ -58,7 +59,7 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, if (routingInfo.primary()) { // All shards must be in sync as it isn't known which shard (replica or primary) will be // moved to the target node and used in the shrink. - if (inSyncAllocations[routingInfo.id].size == (numReplicas + 1)) { + if (inSyncAllocations?.get(routingInfo.id)?.size == (numReplicas + 1)) { numShardsInSync++ } } @@ -87,7 +88,7 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name, val response: IndicesStatsResponse = client.admin().indices().suspendUntil { stats(indexStatsRequests, it) } val shardStats = response.shards if (shardStats == null) { - fail(AttemptMoveShardsStep.FAILURE_MESSAGE, "Failed to move shards in shrink action as shard stats were null.") + cleanupAndFail(AttemptMoveShardsStep.FAILURE_MESSAGE, "Failed to move shards in shrink action as shard stats were null.") return null } return shardStats diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForShrinkStep.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForShrinkStep.kt index 319f6d2ea..906360039 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForShrinkStep.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/shrink/WaitForShrinkStep.kt @@ -30,7 +30,7 @@ class WaitForShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru override suspend fun wrappedExecute(context: StepContext): WaitForShrinkStep { val indexName = context.metadata.index // If the returned shrinkActionProperties are null, then the status has been set to failed, just return - val localShrinkActionProperties = updateAndGetShrinkActionProperties(context) ?: return this + val localShrinkActionProperties = checkShrinkActionPropertiesAndRenewLock(context) ?: return this val targetIndex = localShrinkActionProperties.targetIndexName if (shrinkNotDone(targetIndex, localShrinkActionProperties.targetNumShards, context.client, context.clusterService)) { @@ -42,7 +42,7 @@ class WaitForShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru if (!clearAllocationSettings(context, targetIndex)) return this if (!resetReadOnlyAndRouting(indexName, context.client, localShrinkActionProperties.originalIndexSettings)) return this - if (!deleteShrinkLock(localShrinkActionProperties, context.lockService)) { + if (!deleteShrinkLock(localShrinkActionProperties, context.lockService, logger)) { logger.error("Failed to delete Shrink action lock on node [${localShrinkActionProperties.nodeName}]") } stepStatus = StepStatus.COMPLETED @@ -54,7 +54,7 @@ class WaitForShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru private suspend fun shrinkNotDone(targetIndex: String, targetNumShards: Int, client: Client, clusterService: ClusterService): Boolean { val numPrimaryShardsStarted = getNumPrimaryShardsStarted(client, targetIndex) - val numPrimaryShards = clusterService.state().metadata.indices[targetIndex].numberOfShards + val numPrimaryShards = clusterService.state().metadata.indices[targetIndex]?.numberOfShards return numPrimaryShards != targetNumShards || numPrimaryShardsStarted != targetNumShards } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/addpolicy/TransportAddPolicyAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/addpolicy/TransportAddPolicyAction.kt index 707845dc9..f16d8a92e 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/addpolicy/TransportAddPolicyAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/addpolicy/TransportAddPolicyAction.kt @@ -32,9 +32,9 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue -import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.commons.ConfigConstants import org.opensearch.commons.authuser.User +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.index.Index import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.INDEX_MANAGEMENT_INDEX import org.opensearch.indexmanagement.indexstatemanagement.DefaultIndexMetadataService @@ -50,7 +50,10 @@ import org.opensearch.indexmanagement.indexstatemanagement.util.DEFAULT_INDEX_TY import org.opensearch.indexmanagement.indexstatemanagement.util.FailedIndex import org.opensearch.indexmanagement.indexstatemanagement.util.managedIndexConfigIndexRequest import org.opensearch.indexmanagement.indexstatemanagement.util.removeClusterStateMetadatas +import org.opensearch.indexmanagement.opensearchapi.IndexManagementSecurityContext import org.opensearch.indexmanagement.opensearchapi.parseFromGetResponse +import org.opensearch.indexmanagement.opensearchapi.suspendUntil +import org.opensearch.indexmanagement.opensearchapi.withClosableContext import org.opensearch.indexmanagement.settings.IndexManagementSettings import org.opensearch.indexmanagement.spi.indexstatemanagement.model.ISMIndexMetadata import org.opensearch.indexmanagement.util.IndexUtils @@ -60,8 +63,6 @@ import org.opensearch.indexmanagement.util.SecurityUtils.Companion.validateUserC import org.opensearch.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.transport.TransportService -import java.lang.Exception -import java.lang.IllegalArgumentException import java.time.Duration import java.time.Instant @@ -105,11 +106,10 @@ class TransportAddPolicyAction @Inject constructor( private val client: NodeClient, private val actionListener: ActionListener, private val request: AddPolicyRequest, - private val user: User? = buildUser(client.threadPool().threadContext) + private val user: User? = buildUser(client.threadPool().threadContext), ) { private lateinit var startTime: Instant private lateinit var policy: Policy - private val permittedIndices = mutableListOf() private val indicesToAdd = mutableMapOf() // uuid: name private val failedIndices: MutableList = mutableListOf() @@ -145,55 +145,35 @@ class TransportAddPolicyAction @Inject constructor( return@launch } if (user != null) { - validateIndexPermissions(0, indicesToAdd.values.toList()) - } else { - removeClosedIndices() + withClosableContext(IndexManagementSecurityContext("AddPolicyHandler", settings, client.threadPool().threadContext, user)) { + validateIndexPermissions(indicesToAdd.values.toList()) + } } + removeClosedIndices() } } /** * We filter the requested indices to the indices user has permission to manage and apply policies only on top of those */ - private fun validateIndexPermissions(current: Int, indices: List) { - val request = ManagedIndexRequest().indices(indices[current]) - client.execute( - ManagedIndexAction.INSTANCE, - request, - object : ActionListener { - override fun onResponse(response: AcknowledgedResponse) { - permittedIndices.add(indices[current]) - proceed(current, indices) - } - - override fun onFailure(e: Exception) { - when (e is OpenSearchSecurityException) { - true -> { - proceed(current, indices) - } - false -> { - // failing the request for any other exception - actionListener.onFailure(e) - } - } - } + private suspend fun validateIndexPermissions(indices: List) { + val permittedIndices = mutableListOf() + indices.forEach { index -> + try { + client.suspendUntil { execute(ManagedIndexAction.INSTANCE, ManagedIndexRequest().indices(index), it) } + permittedIndices.add(index) + } catch (e: OpenSearchSecurityException) { + log.debug("No permissions for index [$index]") } - ) - } + } - private fun proceed(current: Int, indices: List) { - if (current < indices.count() - 1) { - validateIndexPermissions(current + 1, indices) - } else { - // sanity check that there are indices - if none then return - if (permittedIndices.isEmpty()) { - actionListener.onResponse(ISMStatusResponse(0, failedIndices)) - return - } - // Filter out the indices that the user does not have permissions for - indicesToAdd.values.removeIf { !permittedIndices.contains(it) } - removeClosedIndices() + // sanity check that there are indices - if none then return + if (permittedIndices.isEmpty()) { + actionListener.onResponse(ISMStatusResponse(0, failedIndices)) + return } + // Filter out the indices that the user does not have permissions for + indicesToAdd.values.removeIf { !permittedIndices.contains(it) } } private fun removeClosedIndices() { diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/explain/TransportExplainAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/explain/TransportExplainAction.kt index a007e2868..3ea39159c 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/explain/TransportExplainAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/explain/TransportExplainAction.kt @@ -269,7 +269,7 @@ class TransportExplainAction @Inject constructor( clusterStateRequest, object : ActionListener { override fun onResponse(response: ClusterStateResponse) { - val clusterStateIndexMetadatas = response.state.metadata.indices.associate { it.key to it.value } + val clusterStateIndexMetadatas = response.state.metadata.indices getMetadataMap(clusterStateIndexMetadatas, threadContext) } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/updateindexmetadata/TransportUpdateManagedIndexMetaDataAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/updateindexmetadata/TransportUpdateManagedIndexMetaDataAction.kt index f5f5a9ae7..e3fb93dc0 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/updateindexmetadata/TransportUpdateManagedIndexMetaDataAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/transport/action/updateindexmetadata/TransportUpdateManagedIndexMetaDataAction.kt @@ -76,7 +76,7 @@ class TransportUpdateManagedIndexMetaDataAction @Inject constructor( .toTypedArray() } - override fun masterOperation( + override fun clusterManagerOperation( request: UpdateManagedIndexMetaDataRequest, state: ClusterState, listener: ActionListener diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtils.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtils.kt index 512129f88..121fae408 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtils.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtils.kt @@ -25,8 +25,7 @@ import org.opensearch.common.settings.ClusterSettings import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.INDEX_MANAGEMENT_INDEX -import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction.Companion.LOCK_RESOURCE_NAME -import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction.Companion.LOCK_RESOURCE_TYPE +import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction.Companion.LOCK_SOURCE_JOB_ID import org.opensearch.indexmanagement.indexstatemanagement.step.shrink.AttemptMoveShardsStep import org.opensearch.indexmanagement.opensearchapi.suspendUntil import org.opensearch.indexmanagement.spi.indexstatemanagement.model.ManagedIndexMetaData @@ -52,9 +51,11 @@ suspend fun releaseShrinkLock( suspend fun deleteShrinkLock( shrinkActionProperties: ShrinkActionProperties, - lockService: LockService + lockService: LockService, + logger: Logger ): Boolean { val lockID = getShrinkLockID(shrinkActionProperties.nodeName) + logger.info("Deleting lock: $lockID") return lockService.suspendUntil { deleteLock(lockID, it) } } @@ -94,11 +95,11 @@ fun getShrinkLockModel( lockSeqNo: Long, lockDurationSecond: Long ): LockModel { - val lockID = getShrinkLockID(nodeName) + val jobID = getShrinkJobID(nodeName) val lockCreationInstant: Instant = Instant.ofEpochSecond(lockEpochSecond) return LockModel( jobIndexName, - lockID, + jobID, lockCreationInstant, lockDurationSecond, false, @@ -163,7 +164,7 @@ fun getDiskSettings(clusterSettings: ClusterSettings): Settings { * Returns the amount of memory in the node which will be free below the high watermark level after adding 2*indexSizeInBytes, or -1 * if adding 2*indexSizeInBytes goes over the high watermark threshold, or if nodeStats does not contain OsStats. */ -fun getNodeFreeMemoryAfterShrink(node: NodeStats, indexSizeInBytes: Long, clusterSettings: ClusterSettings): Long { +fun getNodeFreeDiskSpaceAfterShrink(node: NodeStats, indexSizeInBytes: Long, clusterSettings: ClusterSettings): Long { val fsStats = node.fs if (fsStats != null) { val diskSpaceLeftInNode = fsStats.total.free.bytes @@ -202,7 +203,14 @@ suspend fun resetReadOnlyAndRouting(index: String, client: Client, originalSetti } fun getShrinkLockID(nodeName: String): String { - return "$LOCK_RESOURCE_TYPE-$LOCK_RESOURCE_NAME-$nodeName" + return LockModel.generateLockId( + INDEX_MANAGEMENT_INDEX, + getShrinkJobID(nodeName) + ) +} + +fun getShrinkJobID(nodeName: String): String { + return "$LOCK_SOURCE_JOB_ID-$nodeName" } // Creates a map of shardIds to the set of node names which the shard copies reside on. For example, with 2 replicas diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ActionValidation.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ActionValidation.kt index dcd0410ab..3486c5956 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ActionValidation.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ActionValidation.kt @@ -18,6 +18,7 @@ class ActionValidation( val jvmService: JvmService ) { + @Suppress("ComplexMethod") fun validate(actionName: String, indexName: String): ValidationResult { // map action to validation class val validation = when (actionName) { @@ -28,6 +29,16 @@ class ActionValidation( "read_only" -> ValidateReadOnly(settings, clusterService, jvmService).execute(indexName) "read_write" -> ValidateReadWrite(settings, clusterService, jvmService).execute(indexName) "replica_count" -> ValidateReplicaCount(settings, clusterService, jvmService).execute(indexName) + "snapshot" -> ValidateSnapshot(settings, clusterService, jvmService).execute(indexName) + "transition" -> ValidateTransition(settings, clusterService, jvmService).execute(indexName) + "close" -> ValidateClose(settings, clusterService, jvmService).execute(indexName) + "index_priority" -> ValidateIndexPriority(settings, clusterService, jvmService).execute(indexName) + // No validations for these actions at current stage. + // Reason: https://github.com/opensearch-project/index-management/issues/587 + "notification" -> ValidateNothing(settings, clusterService, jvmService).execute(indexName) + "shrink" -> ValidateNothing(settings, clusterService, jvmService).execute(indexName) + "allocation" -> ValidateNothing(settings, clusterService, jvmService).execute(indexName) + "rollup" -> ValidateNothing(settings, clusterService, jvmService).execute(indexName) else -> { // temporary call until all actions are mapped ValidateNothing(settings, clusterService, jvmService).execute(indexName) diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateClose.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateClose.kt new file mode 100644 index 000000000..aa65fe967 --- /dev/null +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateClose.kt @@ -0,0 +1,70 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.apache.logging.log4j.LogManager +import org.opensearch.cluster.metadata.MetadataCreateIndexService +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.util.OpenForTesting +import org.opensearch.indices.InvalidIndexNameException +import org.opensearch.monitor.jvm.JvmService + +@OpenForTesting +class ValidateClose( + settings: Settings, + clusterService: ClusterService, + jvmService: JvmService +) : Validate(settings, clusterService, jvmService) { + + private val logger = LogManager.getLogger(javaClass) + + @Suppress("ReturnSuppressCount", "ReturnCount") + override fun execute(indexName: String): Validate { + // if these conditions are false, fail validation and do not execute close action + if (!indexExists(indexName) || !validIndex(indexName)) { + validationStatus = ValidationStatus.FAILED + return this + } + validationMessage = getValidationPassedMessage(indexName) + return this + } + + private fun indexExists(indexName: String): Boolean { + val isIndexExists = clusterService.state().metadata.indices.containsKey(indexName) + if (!isIndexExists) { + val message = getNoIndexMessage(indexName) + logger.warn(message) + validationMessage = message + return false + } + return true + } + + private fun validIndex(indexName: String): Boolean { + val exceptionGenerator: (String, String) -> RuntimeException = { index_name, reason -> InvalidIndexNameException(index_name, reason) } + // If the index name is invalid for any reason, this will throw an exception giving the reason why in the message. + // That will be displayed to the user as the cause. + try { + MetadataCreateIndexService.validateIndexOrAliasName(indexName, exceptionGenerator) + } catch (e: Exception) { + val message = getIndexNotValidMessage(indexName) + logger.warn(message) + validationMessage = message + return false + } + return true + } + + @Suppress("TooManyFunctions") + companion object { + const val name = "validate_close" + fun getNoIndexMessage(index: String) = "No such index [index=$index] for close action." + fun getIndexNotValidMessage(index: String) = "Index [index=$index] is not valid. Abort close action on it." + fun getValidationPassedMessage(index: String) = "Close action validation passed for [index=$index]" + } +} diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriority.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriority.kt new file mode 100644 index 000000000..1ecde743e --- /dev/null +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriority.kt @@ -0,0 +1,57 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.apache.logging.log4j.LogManager +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.indexmanagement.util.OpenForTesting +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.monitor.jvm.JvmService + +@OpenForTesting +class ValidateIndexPriority( + settings: Settings, + clusterService: ClusterService, + jvmService: JvmService +) : Validate(settings, clusterService, jvmService) { + + private val logger = LogManager.getLogger(javaClass) + + @Suppress("ReturnSuppressCount", "ReturnCount") + override fun execute(indexName: String): Validate { + // if these conditions are false, fail validation and do not execute index_priority action + if (hasReadOnlyAllowDeleteBlock(indexName)) { + return this + } + validationMessage = getValidationPassedMessage(indexName) + return this + } + + fun hasReadOnlyAllowDeleteBlock(indexName: String): Boolean { + val readOnlyAllowDeleteBlock = settings.get(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + if (!readOnlyAllowDeleteBlock.isNullOrEmpty() && readOnlyAllowDeleteBlock.toBoolean()) { + val message = getReadOnlyAllowDeleteBlockMessage(indexName) + logger.warn(message) + validationStatus = ValidationStatus.FAILED + return true + } + return false + } + + @Suppress("TooManyFunctions") + companion object { + const val name = "validate_index_priority" + fun getReadOnlyAllowDeleteBlockMessage(index: String) = "read_only_allow_delete block is not null for index [index=$index]" + fun getValidationPassedMessage(index: String) = "Index Priority action validation passed for [index=$index]" + } +} diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshot.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshot.kt new file mode 100644 index 000000000..91febd570 --- /dev/null +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshot.kt @@ -0,0 +1,74 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.apache.logging.log4j.LogManager +import org.opensearch.cluster.metadata.MetadataCreateIndexService +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.util.OpenForTesting +import org.opensearch.indices.InvalidIndexNameException +import org.opensearch.monitor.jvm.JvmService + +@OpenForTesting +class ValidateSnapshot( + settings: Settings, + clusterService: ClusterService, + jvmService: JvmService +) : Validate(settings, clusterService, jvmService) { + + private val logger = LogManager.getLogger(javaClass) + + @Suppress("ReturnSuppressCount", "ReturnCount") + override fun execute(indexName: String): Validate { + // if these conditions are false, fail validation and do not execute snapshot action + if (!indexExists(indexName) || !validIndex(indexName)) { + validationStatus = ValidationStatus.FAILED + return this + } + validationMessage = getValidationPassedMessage(indexName) + return this + } + + private fun indexExists(indexName: String): Boolean { + val indexExists = clusterService.state().metadata.indices.containsKey(indexName) + if (!indexExists) { + val message = getNoIndexMessage(indexName) + logger.warn(message) + validationMessage = message + return false + } + return true + } + + // checks if index is valid + private fun validIndex(indexName: String): Boolean { + val exceptionGenerator: (String, String) -> RuntimeException = { index_name, reason -> InvalidIndexNameException(index_name, reason) } + try { + MetadataCreateIndexService.validateIndexOrAliasName(indexName, exceptionGenerator) + } catch (e: Exception) { + val message = getIndexNotValidMessage(indexName) + logger.warn(message) + validationMessage = message + return false + } + return true + } + + @Suppress("TooManyFunctions") + companion object { + const val name = "validate_snapshot" + fun getNoIndexMessage(index: String) = "Index [index=$index] does not exist for snapshot action." + fun getIndexNotValidMessage(index: String) = "Index [index=$index] is not valid for snapshot action." + fun getValidationPassedMessage(index: String) = "Snapshot action validation passed for [index=$index]" + } +} diff --git a/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransition.kt b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransition.kt new file mode 100644 index 000000000..9faeff20f --- /dev/null +++ b/src/main/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransition.kt @@ -0,0 +1,71 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.apache.logging.log4j.LogManager +import org.opensearch.cluster.metadata.MetadataCreateIndexService +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.util.OpenForTesting +import org.opensearch.indices.InvalidIndexNameException +import org.opensearch.monitor.jvm.JvmService + +@OpenForTesting +class ValidateTransition( + settings: Settings, + clusterService: ClusterService, + jvmService: JvmService +) : Validate(settings, clusterService, jvmService) { + + private val logger = LogManager.getLogger(javaClass) + + @Suppress("ReturnSuppressCount", "ReturnCount") + override fun execute(indexName: String): Validate { + // if these conditions are false, fail validation and do not execute transition action + if (!indexExists(indexName) || !validIndex(indexName)) { + return this + } + validationMessage = getValidationPassedMessage(indexName) + return this + } + + private fun indexExists(indexName: String): Boolean { + val isIndexExists = clusterService.state().metadata.indices.containsKey(indexName) + if (!isIndexExists) { + val message = getNoIndexMessage(indexName) + logger.warn(message) + validationStatus = ValidationStatus.RE_VALIDATING + validationMessage = message + return false + } + return true + } + + private fun validIndex(indexName: String): Boolean { + val exceptionGenerator: (String, String) -> RuntimeException = { index_name, reason -> InvalidIndexNameException(index_name, reason) } + // If the index name is invalid for any reason, this will throw an exception giving the reason why in the message. + // That will be displayed to the user as the cause. + try { + MetadataCreateIndexService.validateIndexOrAliasName(indexName, exceptionGenerator) + } catch (e: Exception) { + val message = getIndexNotValidMessage(indexName) + logger.warn(message) + validationStatus = ValidationStatus.RE_VALIDATING + validationMessage = message + return false + } + return true + } + + @Suppress("TooManyFunctions") + companion object { + const val name = "validate_transition" + fun getNoIndexMessage(index: String) = "Index [index=$index] does not exist for transition" + fun getIndexNotValidMessage(index: String) = "Index [index=$index] is not valid for transition" + fun getValidationPassedMessage(index: String) = "Transition action validation passed for [index=$index]" + } +} diff --git a/src/main/kotlin/org/opensearch/indexmanagement/opensearchapi/OpenSearchExtensions.kt b/src/main/kotlin/org/opensearch/indexmanagement/opensearchapi/OpenSearchExtensions.kt index 54c77a8bd..7d21aabea 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/opensearchapi/OpenSearchExtensions.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/opensearchapi/OpenSearchExtensions.kt @@ -30,20 +30,20 @@ import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.core.xcontent.MediaType -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParser.Token import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.InjectSecurity import org.opensearch.commons.authuser.User import org.opensearch.commons.notifications.NotificationsPluginInterface +import org.opensearch.core.xcontent.MediaType +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token import org.opensearch.index.seqno.SequenceNumbers import org.opensearch.indexmanagement.indexstatemanagement.action.ShrinkAction import org.opensearch.indexmanagement.indexstatemanagement.model.ISMTemplate diff --git a/src/main/kotlin/org/opensearch/indexmanagement/refreshanalyzer/RestRefreshSearchAnalyzerAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/refreshanalyzer/RestRefreshSearchAnalyzerAction.kt index 80790622d..3f1e83f0c 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/refreshanalyzer/RestRefreshSearchAnalyzerAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/refreshanalyzer/RestRefreshSearchAnalyzerAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.refreshanalyzer import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.OPEN_DISTRO_BASE_URI import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.PLUGINS_BASE_URI import org.opensearch.rest.BaseRestHandler diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperService.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperService.kt index 2f44f4389..8ac3b365e 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperService.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperService.kt @@ -343,9 +343,9 @@ class RollupMapperService( return RollupJobValidationResult.Failure(getMappingsResult.message, getMappingsResult.cause) } - val indexMapping: MappingMetadata = res.mappings[targetIndexResolvedName] + val indexMapping: MappingMetadata? = res.mappings[targetIndexResolvedName] - return if (((indexMapping.sourceAsMap?.get(_META) as Map<*, *>?)?.get(ROLLUPS) as Map<*, *>?)?.containsKey(rollup.id) == true) { + return if (((indexMapping?.sourceAsMap?.get(_META) as Map<*, *>?)?.get(ROLLUPS) as Map<*, *>?)?.containsKey(rollup.id) == true) { RollupJobValidationResult.Valid } else { RollupJobValidationResult.Invalid("Rollup job [${rollup.id}] does not exist in rollup index [$targetIndexResolvedName]") diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataService.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataService.kt index 34112c6e5..f6d3e8f50 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataService.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataService.kt @@ -19,12 +19,14 @@ import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.client.Client import org.opensearch.common.Rounding +import org.opensearch.common.time.DateFormatter +import org.opensearch.common.time.DateFormatters import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.index.query.MatchAllQueryBuilder import org.opensearch.indexmanagement.IndexManagementPlugin import org.opensearch.indexmanagement.common.model.dimension.DateHistogram @@ -34,7 +36,7 @@ import org.opensearch.indexmanagement.rollup.model.ContinuousMetadata import org.opensearch.indexmanagement.rollup.model.Rollup import org.opensearch.indexmanagement.rollup.model.RollupMetadata import org.opensearch.indexmanagement.rollup.model.RollupStats -import org.opensearch.indexmanagement.rollup.util.DATE_FIELD_EPOCH_MILLIS_FORMAT +import org.opensearch.indexmanagement.rollup.util.DATE_FIELD_STRICT_DATE_OPTIONAL_TIME_FORMAT import org.opensearch.indexmanagement.util.NO_ID import org.opensearch.search.aggregations.bucket.composite.InternalComposite import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder @@ -181,7 +183,7 @@ class RollupMetadataService(val client: Client, val xContentRegistry: NamedXCont .sort(dateHistogram.sourceField, SortOrder.ASC) // TODO: figure out where nulls are sorted .trackTotalHits(false) .fetchSource(false) - .docValueField(dateHistogram.sourceField, DATE_FIELD_EPOCH_MILLIS_FORMAT) + .docValueField(dateHistogram.sourceField, DATE_FIELD_STRICT_DATE_OPTIONAL_TIME_FORMAT) val searchRequest = SearchRequest(rollup.sourceIndex) .source(searchSourceBuilder) .allowPartialSearchResults(false) @@ -194,10 +196,12 @@ class RollupMetadataService(val client: Client, val xContentRegistry: NamedXCont // Get the doc value field of the dateHistogram.sourceField for the first search hit converted to epoch millis // If the doc value is null or empty it will be treated the same as empty doc hits - val firstHitTimestamp = response.hits.hits.first().field(dateHistogram.sourceField).getValue()?.toLong() + val firstHitTimestampAsString: String? = response.hits.hits.first().field(dateHistogram.sourceField).getValue() ?: return StartingTimeResult.NoDocumentsFound - - return StartingTimeResult.Success(getRoundedTime(firstHitTimestamp, dateHistogram)) + // Parse date and extract epochMillis + val formatter = DateFormatter.forPattern(DATE_FIELD_STRICT_DATE_OPTIONAL_TIME_FORMAT) + val epochMillis = DateFormatters.from(formatter.parse(firstHitTimestampAsString), formatter.locale()).toInstant().toEpochMilli() + return StartingTimeResult.Success(getRoundedTime(epochMillis, dateHistogram)) } catch (e: RemoteTransportException) { val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception logger.debug("Error when getting initial start time for rollup [${rollup.id}]: $unwrappedException") diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/action/mapping/TransportUpdateRollupMappingAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/action/mapping/TransportUpdateRollupMappingAction.kt index 38664e41c..2f0caf351 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/action/mapping/TransportUpdateRollupMappingAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/action/mapping/TransportUpdateRollupMappingAction.kt @@ -57,7 +57,7 @@ class TransportUpdateRollupMappingAction @Inject constructor( } @Suppress("ReturnCount", "LongMethod") - override fun masterOperation( + override fun clusterManagerOperation( request: UpdateRollupMappingRequest, state: ClusterState, listener: ActionListener diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptor.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptor.kt index 316451ac7..ffd1e4bd7 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptor.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptor.kt @@ -5,6 +5,9 @@ package org.opensearch.indexmanagement.rollup.interceptor +import org.apache.logging.log4j.LogManager +import org.opensearch.action.support.IndicesOptions +import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings @@ -55,6 +58,8 @@ class RollupInterceptor( val indexNameExpressionResolver: IndexNameExpressionResolver ) : TransportInterceptor { + private val logger = LogManager.getLogger(javaClass) + @Volatile private var searchEnabled = RollupSettings.ROLLUP_SEARCH_ENABLED.get(settings) @Volatile private var searchAllJobs = RollupSettings.ROLLUP_SEARCH_ALL_JOBS.get(settings) @@ -92,7 +97,7 @@ class RollupInterceptor( ?: throw IllegalArgumentException("No rollup job associated with target_index") val queryFieldMappings = getQueryMetadata( request.source().query(), - IndexUtils.getConcreteIndex(rollupJob.sourceIndex, concreteIndices, clusterService.state()) + getConcreteSourceIndex(rollupJob.sourceIndex, indexNameExpressionResolver, clusterService.state()) ) val aggregationFieldMappings = getAggregationMetadata(request.source().aggregations()?.aggregatorFactories) val fieldMappings = queryFieldMappings + aggregationFieldMappings @@ -109,6 +114,26 @@ class RollupInterceptor( } } } + + fun getConcreteSourceIndex(sourceIndex: String, resolver: IndexNameExpressionResolver, clusterState: ClusterState): String { + val concreteIndexNames = resolver.concreteIndexNames(clusterState, IndicesOptions.LENIENT_EXPAND_OPEN, sourceIndex) + if (concreteIndexNames.isEmpty()) { + logger.warn("Cannot resolve rollup sourceIndex [$sourceIndex]") + return "" + } + + var concreteIndexName: String = "" + if (concreteIndexNames.size == 1 && IndexUtils.isConcreteIndex(concreteIndexNames[0], clusterState)) { + concreteIndexName = concreteIndexNames[0] + } else if (concreteIndexNames.size > 1) { + concreteIndexName = IndexUtils.getNewestIndexByCreationDate(concreteIndexNames, clusterState) + } else if (IndexUtils.isAlias(sourceIndex, clusterState) || IndexUtils.isDataStream(sourceIndex, clusterState)) { + concreteIndexName = IndexUtils.getWriteIndex(sourceIndex, clusterState) + ?: IndexUtils.getNewestIndexByCreationDate(concreteIndexNames, clusterState) // + } + return concreteIndexName + } + /* * Validate that all indices have rollup job which matches field mappings from request * TODO return compiled list of issues here instead of just throwing exception @@ -168,16 +193,15 @@ class RollupInterceptor( return fieldMappings } - @Suppress("ComplexMethod", "ThrowsCount") + @Suppress("ComplexMethod", "ThrowsCount", "LongMethod") private fun getQueryMetadata( query: QueryBuilder?, - concreteSourceIndexName: String, + concreteSourceIndexName: String?, fieldMappings: MutableSet = mutableSetOf() ): Set { if (query == null) { return fieldMappings } - when (query) { is TermQueryBuilder -> { fieldMappings.add(RollupFieldMapping(RollupFieldMapping.Companion.FieldType.DIMENSION, query.fieldName(), Dimension.Type.TERMS.type)) @@ -218,6 +242,9 @@ class RollupInterceptor( fieldMappings.add(RollupFieldMapping(RollupFieldMapping.Companion.FieldType.DIMENSION, query.fieldName(), Dimension.Type.TERMS.type)) } is QueryStringQueryBuilder -> { + if (concreteSourceIndexName.isNullOrEmpty()) { + throw IllegalArgumentException("Can't parse query_string query without sourceIndex mappings!") + } // Throws IllegalArgumentException if unable to parse query val (queryFields, otherFields) = QueryStringQueryUtil.extractFieldsFromQueryString(query, concreteSourceIndexName) for (field in queryFields) { @@ -231,7 +258,6 @@ class RollupInterceptor( throw IllegalArgumentException("The ${query.name} query is currently not supported in rollups") } } - return fieldMappings } @@ -302,10 +328,11 @@ class RollupInterceptor( private fun rewriteShardSearchForRollupJobs(request: ShardSearchRequest, matchingRollupJobs: Map>) { val matchedRollup = pickRollupJob(matchingRollupJobs.keys) val fieldNameMappingTypeMap = matchingRollupJobs.getValue(matchedRollup).associateBy({ it.fieldName }, { it.mappingType }) + val concreteSourceIndex = getConcreteSourceIndex(matchedRollup.sourceIndex, indexNameExpressionResolver, clusterService.state()) if (searchAllJobs) { - request.source(request.source().rewriteSearchSourceBuilder(matchingRollupJobs.keys, fieldNameMappingTypeMap, matchedRollup.sourceIndex)) + request.source(request.source().rewriteSearchSourceBuilder(matchingRollupJobs.keys, fieldNameMappingTypeMap, concreteSourceIndex)) } else { - request.source(request.source().rewriteSearchSourceBuilder(matchedRollup, fieldNameMappingTypeMap, matchedRollup.sourceIndex)) + request.source(request.source().rewriteSearchSourceBuilder(matchedRollup, fieldNameMappingTypeMap, concreteSourceIndex)) } } } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/resthandler/RestExplainRollupAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/resthandler/RestExplainRollupAction.kt index 91df0b5e6..bce23b1ce 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/resthandler/RestExplainRollupAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/resthandler/RestExplainRollupAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.rollup.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.LEGACY_ROLLUP_JOBS_BASE_URI import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.ROLLUP_JOBS_BASE_URI import org.opensearch.indexmanagement.rollup.action.explain.ExplainRollupAction diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupFieldValueExpressionResolver.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupFieldValueExpressionResolver.kt index fe2c38801..f800b54cd 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupFieldValueExpressionResolver.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupFieldValueExpressionResolver.kt @@ -57,9 +57,9 @@ object RollupFieldValueExpressionResolver { open class IndexAliasUtils(val clusterService: ClusterService) { open fun hasAlias(index: String): Boolean { - val aliases = this.clusterService.state().metadata().indices.get(index)?.aliases + val aliases = this.clusterService.state().metadata().indices[index]?.aliases if (aliases != null) { - return aliases.size() > 0 + return aliases.isNotEmpty() } return false } diff --git a/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupUtils.kt b/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupUtils.kt index ef5c05868..f6d749fbb 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupUtils.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/rollup/util/RollupUtils.kt @@ -12,11 +12,11 @@ import org.opensearch.action.search.SearchRequest import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.core.xcontent.XContentParser.Token import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser.Token import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.BoostingQueryBuilder import org.opensearch.index.query.ConstantScoreQueryBuilder @@ -63,6 +63,7 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder import org.opensearch.search.builder.SearchSourceBuilder +const val DATE_FIELD_STRICT_DATE_OPTIONAL_TIME_FORMAT = "strict_date_optional_time" const val DATE_FIELD_EPOCH_MILLIS_FORMAT = "epoch_millis" @Suppress("ReturnCount") diff --git a/src/main/kotlin/org/opensearch/indexmanagement/snapshotmanagement/api/resthandler/RestExplainSMPolicyHandler.kt b/src/main/kotlin/org/opensearch/indexmanagement/snapshotmanagement/api/resthandler/RestExplainSMPolicyHandler.kt index d936f021a..deff23cd7 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/snapshotmanagement/api/resthandler/RestExplainSMPolicyHandler.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/snapshotmanagement/api/resthandler/RestExplainSMPolicyHandler.kt @@ -7,7 +7,7 @@ package org.opensearch.indexmanagement.snapshotmanagement.api.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.SM_POLICIES_URI import org.opensearch.indexmanagement.snapshotmanagement.api.transport.SMActions import org.opensearch.indexmanagement.snapshotmanagement.api.transport.explain.ExplainSMPolicyRequest diff --git a/src/main/kotlin/org/opensearch/indexmanagement/transform/resthandler/RestExplainTransformAction.kt b/src/main/kotlin/org/opensearch/indexmanagement/transform/resthandler/RestExplainTransformAction.kt index f7e4612ca..bc3653235 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/transform/resthandler/RestExplainTransformAction.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/transform/resthandler/RestExplainTransformAction.kt @@ -6,7 +6,7 @@ package org.opensearch.indexmanagement.transform.resthandler import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.TRANSFORM_BASE_URI import org.opensearch.indexmanagement.transform.action.explain.ExplainTransformAction import org.opensearch.indexmanagement.transform.action.explain.ExplainTransformRequest diff --git a/src/main/kotlin/org/opensearch/indexmanagement/util/IndexManagementException.kt b/src/main/kotlin/org/opensearch/indexmanagement/util/IndexManagementException.kt index 64a434a14..c6b6b08a2 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/util/IndexManagementException.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/util/IndexManagementException.kt @@ -7,7 +7,7 @@ package org.opensearch.indexmanagement.util import org.opensearch.OpenSearchException import org.opensearch.OpenSearchStatusException -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.ValidationException import org.opensearch.index.IndexNotFoundException import org.opensearch.rest.RestStatus diff --git a/src/main/kotlin/org/opensearch/indexmanagement/util/IndexUtils.kt b/src/main/kotlin/org/opensearch/indexmanagement/util/IndexUtils.kt index 5c5af20f3..c3c923bec 100644 --- a/src/main/kotlin/org/opensearch/indexmanagement/util/IndexUtils.kt +++ b/src/main/kotlin/org/opensearch/indexmanagement/util/IndexUtils.kt @@ -15,9 +15,9 @@ import org.opensearch.cluster.metadata.IndexAbstraction import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.common.hash.MurmurHash3 import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.core.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentType import org.opensearch.indexmanagement.IndexManagementIndices import org.opensearch.indexmanagement.IndexManagementPlugin import java.nio.ByteBuffer @@ -80,10 +80,10 @@ class IndexUtils { return DEFAULT_SCHEMA_VERSION } - fun shouldUpdateIndex(index: IndexMetadata, newVersion: Long): Boolean { + fun shouldUpdateIndex(index: IndexMetadata?, newVersion: Long): Boolean { var oldVersion = DEFAULT_SCHEMA_VERSION - val indexMapping = index.mapping()?.sourceAsMap() + val indexMapping = index?.mapping()?.sourceAsMap() if (indexMapping != null && indexMapping.containsKey(_META) && indexMapping[_META] is HashMap<*, *>) { val metaData = indexMapping[_META] as HashMap<*, *> if (metaData.containsKey(SCHEMA_VERSION)) { @@ -234,24 +234,5 @@ class IndexUtils { return clusterState.metadata .indicesLookup[indexName]!!.type == IndexAbstraction.Type.CONCRETE_INDEX } - - fun getConcreteIndex(indexName: String, concreteIndices: Array, clusterState: ClusterState): String { - - if (concreteIndices.isEmpty()) { - throw IllegalArgumentException("ConcreteIndices list can't be empty!") - } - - var concreteIndexName: String - if (concreteIndices.size == 1 && isConcreteIndex(indexName, clusterState)) { - concreteIndexName = indexName - } else if (isAlias(indexName, clusterState) || isDataStream(indexName, clusterState)) { - concreteIndexName = getWriteIndex(indexName, clusterState) - ?: getNewestIndexByCreationDate(concreteIndices, clusterState) // - } else { - concreteIndexName = getNewestIndexByCreationDate(concreteIndices, clusterState) - } - - return concreteIndexName - } } } diff --git a/src/test/kotlin/org/opensearch/indexmanagement/IndexStateManagementSecurityBehaviorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/IndexStateManagementSecurityBehaviorIT.kt index 18fbd72ba..7966222c6 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/IndexStateManagementSecurityBehaviorIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/IndexStateManagementSecurityBehaviorIT.kt @@ -43,7 +43,7 @@ import java.util.Locale class IndexStateManagementSecurityBehaviorIT : SecurityRestTestCase() { private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) - private val password = "Test123!" + private val password = "Test123sdfsdfds435346FDGDFGDFG2342&^%#$@#35!" private val superIsmUser = "john" private var superUserClient: RestClient? = null diff --git a/src/test/kotlin/org/opensearch/indexmanagement/PolicySecurityBehaviorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/PolicySecurityBehaviorIT.kt new file mode 100644 index 000000000..5af30a616 --- /dev/null +++ b/src/test/kotlin/org/opensearch/indexmanagement/PolicySecurityBehaviorIT.kt @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.indexmanagement + +import org.junit.After +import org.junit.Before +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest +import org.opensearch.client.ResponseException +import org.opensearch.client.RestClient +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.INDEX_MANAGEMENT_INDEX +import org.opensearch.indexmanagement.indexstatemanagement.action.AliasAction +import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.model.State +import org.opensearch.indexmanagement.indexstatemanagement.randomErrorNotification +import org.opensearch.indexmanagement.indexstatemanagement.transport.action.addpolicy.AddPolicyAction +import org.opensearch.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Locale + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +class PolicySecurityBehaviorIT : SecurityRestTestCase() { + private val password = "TestpgfhertergGd435AASA123!" + + private val ismUser = "john" + private var ismUserClient: RestClient? = null + + private val permittedIndicesPrefix = "permitted-index" + private val permittedIndicesPattern = "permitted-index*" + @Before + fun setupUsersAndRoles() { +// updateClusterSetting(ManagedIndexSettings.JITTER.key, "0.0", false) + + val custerPermissions = listOf( + AddPolicyAction.NAME + ) + + val indexPermissions = listOf( + MANAGED_INDEX, + CREATE_INDEX, + WRITE_INDEX, + BULK_WRITE_INDEX, + GET_INDEX_MAPPING, + SEARCH_INDEX, + PUT_INDEX_MAPPING + ) + createUser(ismUser, password, listOf(HELPDESK)) + createRole(HELPDESK_ROLE, custerPermissions, indexPermissions, listOf(permittedIndicesPattern)) + assignRoleToUsers(HELPDESK_ROLE, listOf(ismUser)) + + ismUserClient = + SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), ismUser, password).setSocketTimeout(60000) + .build() + } + + @After + fun cleanup() { + // Remove user + ismUserClient?.close() + deleteUser(ismUser) + deleteRole(HELPDESK_ROLE) + + deleteIndexByName("$INDEX_MANAGEMENT_INDEX") + } + + fun `test add policy`() { + + val notPermittedIndexPrefix = OpenSearchTestCase.randomAlphaOfLength(10).lowercase(Locale.getDefault()) + val policyId = OpenSearchTestCase.randomAlphaOfLength(10) + + val permittedindices = mutableListOf() + val notPermittedindices = mutableListOf() + for (i in 1..5) { + createIndex("$notPermittedIndexPrefix-$i", """ "properties": { "field_a": { "type": "long" } }""", client()) + createIndex("$permittedIndicesPrefix-$i", """ "properties": { "field_a": { "type": "long" } }""", client()) + notPermittedindices += "$notPermittedIndexPrefix-$i" + permittedindices += "$permittedIndicesPrefix-$i" + } + + val allIndicesJoined = (notPermittedindices + permittedindices).joinToString(separator = ",") + try { + val actions = listOf(IndicesAliasesRequest.AliasActions.add().alias("aaa")) + val actionConfig = AliasAction(actions = actions, index = 0) + val states = listOf(State("alias", listOf(actionConfig), listOf())) + val policy = Policy( + id = policyId, + description = "description", + schemaVersion = 1L, + lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), + errorNotification = randomErrorNotification(), + defaultState = "alias", + states = states + ) + createPolicy(policy, policy.id, true, client()) + // Call AddPolicyAction as user + addPolicyToIndex(index = allIndicesJoined, policyId = policy.id, expectedStatus = RestStatus.OK, client = ismUserClient!!) + + refreshAllIndices() + + val explainResponseAsMap = managedIndexExplainAllAsMap(client()) + assertEquals(5, explainResponseAsMap["total_managed_indices"] as Int) + } catch (e: ResponseException) { + logger.error(e.message, e) + } finally { + deleteIndexByName("$permittedIndicesPrefix*") + deleteIndexByName("$notPermittedIndexPrefix*") + } + } +} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/RollupSecurityBehaviorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/RollupSecurityBehaviorIT.kt index b0c963e50..a34930c90 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/RollupSecurityBehaviorIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/RollupSecurityBehaviorIT.kt @@ -34,7 +34,7 @@ import java.time.temporal.ChronoUnit @TestLogging("level:DEBUG", reason = "Debug for tests.") class RollupSecurityBehaviorIT : SecurityRestTestCase() { - private val password = "Test123!" + private val password = "TestpgfhertergGd435AASA123!" private val superRollupUser = "john" private var superUserClient: RestClient? = null diff --git a/src/test/kotlin/org/opensearch/indexmanagement/SecurityBehaviorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/SecurityBehaviorIT.kt index 90dfc398b..e7678745c 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/SecurityBehaviorIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/SecurityBehaviorIT.kt @@ -21,7 +21,7 @@ import org.opensearch.test.junit.annotations.TestLogging @TestLogging("level:DEBUG", reason = "Debug for tests.") class SecurityBehaviorIT : SecurityRestTestCase() { - private val password = "Test123!" + private val password = "TestpgfhertergGd435AASA123!" private val john = "john" private var johnClient: RestClient? = null diff --git a/src/test/kotlin/org/opensearch/indexmanagement/SecurityRestTestCase.kt b/src/test/kotlin/org/opensearch/indexmanagement/SecurityRestTestCase.kt index c9d480d96..4f6b67d74 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/SecurityRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/SecurityRestTestCase.kt @@ -11,8 +11,8 @@ package org.opensearch.indexmanagement -import org.apache.hc.core5.http.HttpHeaders import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders import org.apache.hc.core5.http.io.entity.StringEntity import org.apache.hc.core5.http.message.BasicHeader import org.opensearch.client.Request @@ -25,6 +25,7 @@ import org.opensearch.common.xcontent.XContentType import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase import org.opensearch.indexmanagement.indexstatemanagement.model.ManagedIndexConfig import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.resthandler.RestExplainAction import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings import org.opensearch.indexmanagement.indexstatemanagement.toJsonString import org.opensearch.indexmanagement.indexstatemanagement.util.INDEX_NUMBER_OF_REPLICAS @@ -213,6 +214,13 @@ abstract class SecurityRestTestCase : IndexManagementRestTestCase() { return IndexStateManagementRestTestCaseExt.createPolicyExt(policy, policyId, refresh, client) } + protected fun managedIndexExplainAllAsMap( + client: RestClient?, + ): Map<*, *> { + val request = Request("GET", "${RestExplainAction.EXPLAIN_BASE_URI}") + return entityAsMap(executeRequest(request, RestStatus.OK, client!!)) + } + protected fun getExistingManagedIndexConfig(index: String) = IndexStateManagementRestTestCaseExt.getExistingManagedIndexConfigExt(index) protected fun createPolicyJson( @@ -400,7 +408,7 @@ abstract class SecurityRestTestCase : IndexManagementRestTestCase() { val request = Request(RestRequest.Method.PUT.name, "_plugins/_security/api/internalusers/$name") request.setJsonEntity(json) - executeRequest(request, null, client()) + executeRequest(request, RestStatus.CREATED, client()) } protected fun createUserWithCustomRole( @@ -453,7 +461,7 @@ abstract class SecurityRestTestCase : IndexManagementRestTestCase() { """.trimIndent() request.setJsonEntity(entity) - client().performRequest(request) + executeRequest(request, RestStatus.CREATED, client()) } protected fun assignRoleToUsers(role: String, users: List) { @@ -538,7 +546,7 @@ abstract class SecurityRestTestCase : IndexManagementRestTestCase() { const val AVAILABILITY_INDEX = "availability-1" const val PHONE_OPERATOR = "phone_operator" - const val HELPDESK = "helpdesk_stuff" + const val HELPDESK = "helpdesk_staff" const val HELPDESK_ROLE = "helpdesk_role" const val PHONE_OPERATOR_ROLE = "phone_operator_role" diff --git a/src/test/kotlin/org/opensearch/indexmanagement/TransformSecurityBehaviorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/TransformSecurityBehaviorIT.kt index 371866e29..071ece9c9 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/TransformSecurityBehaviorIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/TransformSecurityBehaviorIT.kt @@ -28,7 +28,7 @@ import java.time.temporal.ChronoUnit @TestLogging("level:DEBUG", reason = "Debug for tests.") class TransformSecurityBehaviorIT : SecurityRestTestCase() { - private val password = "Test123!" + private val password = "TestpgfhertergGd435AASA123!" private val superTransformUser = "john" private var superUserClient: RestClient? = null diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementIntegTestCase.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementIntegTestCase.kt deleted file mode 100644 index 8d37f4805..000000000 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/IndexStateManagementIntegTestCase.kt +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement - -import org.apache.hc.core5.http.ContentType -import org.apache.hc.core5.http.io.entity.StringEntity -import org.junit.After -import org.junit.Before -import org.opensearch.OpenSearchParseException -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse -import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.client.Request -import org.opensearch.client.Response -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand -import org.opensearch.common.Strings -import org.opensearch.common.settings.Settings -import org.opensearch.core.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.indexmanagement.IndexManagementPlugin -import org.opensearch.indexmanagement.IndexManagementRestTestCase.Companion.wipeAllIndices -import org.opensearch.indexmanagement.indexstatemanagement.model.ManagedIndexConfig -import org.opensearch.indexmanagement.indexstatemanagement.model.Policy -import org.opensearch.indexmanagement.indexstatemanagement.resthandler.RestExplainAction -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.explain.ExplainAction -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.explain.TransportExplainAction -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.TransportUpdateManagedIndexMetaDataAction -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataAction -import org.opensearch.indexmanagement.indexstatemanagement.util.TOTAL_MANAGED_INDICES -import org.opensearch.indexmanagement.makeRequest -import org.opensearch.indexmanagement.opensearchapi.parseWithType -import org.opensearch.indexmanagement.spi.indexstatemanagement.model.ManagedIndexMetaData -import org.opensearch.indexmanagement.spi.indexstatemanagement.model.PolicyRetryInfoMetaData -import org.opensearch.indexmanagement.spi.indexstatemanagement.model.StateMetaData -import org.opensearch.indexmanagement.waitFor -import org.opensearch.jobscheduler.spi.schedule.IntervalSchedule -import org.opensearch.plugins.ActionPlugin -import org.opensearch.plugins.Plugin -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestStatus -import org.opensearch.test.OpenSearchIntegTestCase -import org.opensearch.test.rest.OpenSearchRestTestCase.entityAsMap -import java.io.IOException -import java.time.Duration -import java.time.Instant - -abstract class IndexStateManagementIntegTestCase : OpenSearchIntegTestCase() { - - @After - fun clearIndicesAfterEachTest() { - wipeAllIndices(getRestClient()) - } - - @Before - fun disableIndexStateManagementJitter() { - // jitter would add a test-breaking delay to the integration tests - updateIndexStateManagementJitterSetting(0.0) - } - - protected val isMixedNodeRegressionTest = System.getProperty("cluster.mixed", "false")!!.toBoolean() - - var metadataToClusterState = ManagedIndexMetaData( - index = "", - indexUuid = "", - policyID = "", - policySeqNo = 0, - policyPrimaryTerm = 1, - policyCompleted = false, - rolledOver = false, - indexCreationDate = null, - transitionTo = null, - stateMetaData = StateMetaData("ReplicaCountState", 1234), - actionMetaData = null, - stepMetaData = null, - policyRetryInfo = PolicyRetryInfoMetaData(false, 0), - info = mapOf("message" to "Happy moving") - ) - - override fun nodePlugins(): Collection> { - return listOf(IndexManagementPlugin::class.java) - } - - class TestPlugin : ActionPlugin, Plugin() { - override fun getActions(): List> { - return listOf( - ActionPlugin.ActionHandler( - UpdateManagedIndexMetaDataAction.INSTANCE, - TransportUpdateManagedIndexMetaDataAction::class.java - ), - ActionPlugin.ActionHandler(ExplainAction.INSTANCE, TransportExplainAction::class.java) - ) - } - } - - // TODO: ...convert into a test REST plugin that allows us to execute the transport action? -// override fun transportClientPlugins(): Collection> { -// return listOf(TestPlugin::class.java) -// } - - protected fun getIndexMetadata(indexName: String): IndexMetadata { - return client().admin().cluster().prepareState() - .setIndices(indexName) - .setMetadata(true).get() - .state.metadata.indices[indexName] - } - - // reuse utility fun from RestTestCase - fun createPolicy( - policy: Policy, - policyId: String = randomAlphaOfLength(10), - refresh: Boolean = true - ): Policy { - val response = createPolicyJson(policy.toJsonString(), policyId, refresh) - - val policyJson = JsonXContent.jsonXContent - .createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - val createdId = policyJson["_id"] as String - assertEquals("policy ids are not the same", policyId, createdId) - return policy.copy( - id = createdId, - seqNo = (policyJson["_seq_no"] as Int).toLong(), - primaryTerm = (policyJson["_primary_term"] as Int).toLong() - ) - } - - protected fun createPolicyJson( - policyString: String, - policyId: String, - refresh: Boolean = true - ): Response { - val response = getRestClient() - .makeRequest( - "PUT", - "${IndexManagementPlugin.POLICY_BASE_URI}/$policyId?refresh=$refresh", - emptyMap(), - StringEntity(policyString, ContentType.APPLICATION_JSON) - ) - assertEquals("Unable to create a new policy", RestStatus.CREATED, response.restStatus()) - return response - } - - protected fun Response.restStatus(): RestStatus = RestStatus.fromCode(this.statusLine.statusCode) - - protected fun addPolicyToIndex( - index: String, - policyID: String - ) { - val body = """ - { - "policy_id": "$policyID" - } - """.trimIndent() - val response = getRestClient() - .makeRequest("POST", "/_opendistro/_ism/add/$index", StringEntity(body, ContentType.APPLICATION_JSON)) - assertEquals("Unexpected RestStatus", RestStatus.OK, response.restStatus()) - } - - protected fun getExistingManagedIndexConfig(index: String): ManagedIndexConfig { - return waitFor { - val config = getManagedIndexConfig(index) - assertNotNull("ManagedIndexConfig is null", config) - config!! - } - } - - protected fun getManagedIndexConfig(index: String): ManagedIndexConfig? { - val request = """ - { - "seq_no_primary_term": true, - "query": { - "term": { - "${ManagedIndexConfig.MANAGED_INDEX_TYPE}.${ManagedIndexConfig.INDEX_FIELD}": "$index" - } - } - } - """.trimIndent() - val response = getRestClient().makeRequest( - "POST", "${IndexManagementPlugin.INDEX_MANAGEMENT_INDEX}/_search", emptyMap(), - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Request failed", RestStatus.OK, response.restStatus()) - val searchResponse = - SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, response.entity.content)) - assertTrue("Found more than one managed index config", searchResponse.hits.hits.size < 2) - val hit = searchResponse.hits.hits.firstOrNull() - return hit?.run { - val xcp = createParser(JsonXContent.jsonXContent, this.sourceRef) - xcp.parseWithType(id, seqNo, primaryTerm, ManagedIndexConfig.Companion::parse) - } - } - - protected fun updateManagedIndexConfigStartTime(update: ManagedIndexConfig, desiredStartTimeMillis: Long? = null) { - val intervalSchedule = (update.jobSchedule as IntervalSchedule) - val millis = Duration.of(intervalSchedule.interval.toLong(), intervalSchedule.unit).minusSeconds(2).toMillis() - val startTimeMillis = desiredStartTimeMillis ?: Instant.now().toEpochMilli() - millis - val response = getRestClient().makeRequest( - "POST", "${IndexManagementPlugin.INDEX_MANAGEMENT_INDEX}/_update/${update.id}", - StringEntity( - "{\"doc\":{\"managed_index\":{\"schedule\":{\"interval\":{\"start_time\":" + - "\"$startTimeMillis\"}}}}}", - ContentType.APPLICATION_JSON - ) - ) - - assertEquals("Request failed", RestStatus.OK, response.restStatus()) - } - - protected fun updateManagedIndexConfigPolicy(update: ManagedIndexConfig, policy: Policy) { - val policyJsonString = policy.toJsonString() - logger.info("policy string: $policyJsonString") - var response = getRestClient().makeRequest( - "POST", "${IndexManagementPlugin.INDEX_MANAGEMENT_INDEX}/_update/${update.id}", - StringEntity( - "{\"doc\":{\"managed_index\": $policyJsonString }}", - ContentType.APPLICATION_JSON - ) - ) - - assertEquals("Request failed", RestStatus.OK, response.restStatus()) - - response = getRestClient().makeRequest( - "POST", "${IndexManagementPlugin.INDEX_MANAGEMENT_INDEX}/_update/${update.id}", - StringEntity( - "{\"doc\":{\"managed_index\": {\"policy_seq_no\": \"0\", \"policy_primary_term\": \"1\"} }}", - ContentType.APPLICATION_JSON - ) - ) - - assertEquals("Request failed", RestStatus.OK, response.restStatus()) - } - - @Suppress("UNCHECKED_CAST") - protected fun getNumberOfReplicasSetting(indexName: String): Int { - val indexSettings = getIndexSettings(indexName) as Map>> - return (indexSettings[indexName]!!["settings"]!!["index.number_of_replicas"] as String).toInt() - } - - @Throws(IOException::class) - protected open fun getIndexSettings(index: String): Map? { - val request = Request("GET", "/$index/_settings") - request.addParameter("flat_settings", "true") - val response = getRestClient().performRequest(request) - response.entity.content.use { `is` -> - return XContentHelper.convertToMap( - XContentType.JSON.xContent(), - `is`, - true - ) - } - } - - protected fun getExplainManagedIndexMetaData(indexName: String): ManagedIndexMetaData { - if (indexName.contains("*") || indexName.contains(",")) { - throw IllegalArgumentException("This method is only for a single concrete index") - } - - val response = getRestClient().makeRequest( - RestRequest.Method.GET.toString(), - "${RestExplainAction.EXPLAIN_BASE_URI}/$indexName" - ) - assertEquals("Unexpected RestStatus", RestStatus.OK, response.restStatus()) - - lateinit var metadata: ManagedIndexMetaData - val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken( - XContentParser.Token.START_OBJECT, - xcp.nextToken(), - xcp - ) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - xcp.currentName() - xcp.nextToken() - if (xcp.currentName() == TOTAL_MANAGED_INDICES) xcp.intValue() - else metadata = ManagedIndexMetaData.parse(xcp) - } - return metadata - } - - protected fun assertIndexExists(index: String) { - val response = getRestClient().makeRequest("HEAD", index) - assertEquals("Index $index does not exist.", RestStatus.OK, response.restStatus()) - } - - fun getShardSegmentStats(index: String): Map { - val response = getRestClient().makeRequest("GET", "/$index/_stats/segments?level=shards") - - assertEquals("Stats request failed", RestStatus.OK, response.restStatus()) - - return response.asMap() - } - - fun catIndexShard(index: String): List { - val response = getRestClient().makeRequest("GET", "_cat/shards/$index?format=json") - - assertEquals("Stats request failed", RestStatus.OK, response.restStatus()) - - try { - return JsonXContent.jsonXContent - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - response.entity.content - ) - .use { parser -> parser.list() } - } catch (e: IOException) { - throw OpenSearchParseException("Failed to parse content to list", e) - } - } - - fun Response.asMap(): Map = entityAsMap(this) - - fun rerouteShard(configIndexName: String, fromNode: String, toNode: String) { - logger.info("Reallocating Shard. From Node: $fromNode To Node: $toNode ") - val moveCommand = MoveAllocationCommand(configIndexName, 0, fromNode, toNode) - val rerouteResponse = client().admin().cluster() - .reroute(ClusterRerouteRequest().add(moveCommand)).actionGet() - logger.info("reroute success? ${rerouteResponse.isAcknowledged}") - } - - fun updateIndexSettings(index: String, settings: Settings) { - val request = Request("PUT", "/$index/_settings") - request.setJsonEntity(Strings.toString(XContentType.JSON, settings)) - getRestClient().performRequest(request) - } - - fun updateClusterSetting(key: String, value: String?, escapeValue: Boolean = true) { - val formattedValue = if (escapeValue) "\"$value\"" else value - val request = """ - { - "persistent": { - "$key": $formattedValue - } - } - """.trimIndent() - val res = getRestClient().makeRequest( - "PUT", "_cluster/settings", emptyMap(), - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Request failed", RestStatus.OK, res.restStatus()) - } - - protected fun updateIndexStateManagementJitterSetting(value: Double?) { - updateClusterSetting(ManagedIndexSettings.JITTER.key, value.toString(), false) - } -} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataRegressionIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataRegressionIT.kt deleted file mode 100644 index e18aa6c33..000000000 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataRegressionIT.kt +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement - -import com.carrotsearch.randomizedtesting.RandomizedTest.sleep -import org.junit.After -import org.junit.Assume -import org.junit.Before -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.common.settings.Settings -import org.opensearch.index.Index -import org.opensearch.indexmanagement.IndexManagementPlugin.Companion.INDEX_MANAGEMENT_INDEX -import org.opensearch.indexmanagement.indexstatemanagement.action.ReplicaCountAction -import org.opensearch.indexmanagement.indexstatemanagement.model.Policy -import org.opensearch.indexmanagement.indexstatemanagement.model.State -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.explain.TransportExplainAction.Companion.METADATA_CORRUPT_WARNING -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.explain.TransportExplainAction.Companion.METADATA_MOVING_WARNING -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataAction -import org.opensearch.indexmanagement.indexstatemanagement.transport.action.updateindexmetadata.UpdateManagedIndexMetaDataRequest -import org.opensearch.indexmanagement.waitFor -import java.time.Instant -import java.time.temporal.ChronoUnit -import java.util.Locale -import kotlin.collections.HashMap - -class MetadataRegressionIT : IndexStateManagementIntegTestCase() { - - private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) - - @Before - fun startMetadataService() { - // metadata service could be stopped before following tests start run - // this will enable metadata service again - updateClusterSetting(ManagedIndexSettings.METADATA_SERVICE_STATUS.key, "-1") - updateClusterSetting(ManagedIndexSettings.METADATA_SERVICE_STATUS.key, "0") - } - - @After - fun cleanClusterSetting() { - // need to clean up otherwise will throw error - updateClusterSetting(ManagedIndexSettings.METADATA_SERVICE_STATUS.key, null, false) - updateClusterSetting(ManagedIndexSettings.TEMPLATE_MIGRATION_CONTROL.key, null, false) - updateIndexStateManagementJitterSetting(null) - } - - fun `test move metadata service`() { - val indexName = "${testIndexName}_index_1" - val policyID = "${testIndexName}_testPolicyName_1" - val actionConfig = ReplicaCountAction(10, 0) - val states = listOf(State(name = "ReplicaCountState", actions = listOf(actionConfig), transitions = listOf())) - val policy = Policy( - id = policyID, - description = "$testIndexName description", - schemaVersion = 1L, - lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), - errorNotification = randomErrorNotification(), - defaultState = states[0].name, - states = states - ) - - createPolicy(policy, policyID) - createIndex(indexName) - - // create a job - addPolicyToIndex(indexName, policyID) - - // put some metadata into cluster state - var indexMetadata = getIndexMetadata(indexName) - metadataToClusterState = metadataToClusterState.copy( - index = indexName, - indexUuid = indexMetadata.indexUUID, - policyID = policyID - ) - val request = UpdateManagedIndexMetaDataRequest( - indicesToAddManagedIndexMetaDataTo = listOf( - Pair(Index(metadataToClusterState.index, metadataToClusterState.indexUuid), metadataToClusterState) - ) - ) - val response: AcknowledgedResponse = client().execute( - UpdateManagedIndexMetaDataAction.INSTANCE, request - ).get() - logger.info(response.isAcknowledged) - indexMetadata = getIndexMetadata(indexName) - logger.info("check if metadata is saved in cluster state: ${indexMetadata.getCustomData("managed_index_metadata")}") - - // TODO increase wait time since flaky seeing here. After looking through the log - // it's more likely a test framework execution lag. - waitFor(Instant.ofEpochSecond(60)) { - assertEquals( - METADATA_MOVING_WARNING, - getExplainManagedIndexMetaData(indexName).info?.get("message") - ) - } - - waitFor(Instant.ofEpochSecond(120)) { - assertEquals( - "Happy moving", - getExplainManagedIndexMetaData(indexName).info?.get("message") - ) - assertEquals(null, getIndexMetadata(indexName).getCustomData("managed_index_metadata")) - } - - logger.info("metadata has moved") - - val managedIndexConfig = getExistingManagedIndexConfig(indexName) - // Change the start time so the job will trigger in 2 seconds, since there is metadata and policy with the index there is no initialization - updateManagedIndexConfigStartTime(managedIndexConfig) - - waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } - waitFor { - assertEquals( - "Index did not set number_of_replicas to ${actionConfig.numOfReplicas}", - actionConfig.numOfReplicas, - getNumberOfReplicasSetting(indexName) - ) - } - } - - fun `test job can continue run from cluster state metadata`() { - /** - * new version of ISM plugin can handle metadata in cluster state - * when job already started - * - * create index, add policy to it - * manually add policy field to managed-index so runner won't do initialisation itself - * add metadata into cluster state - * then check if we can continue run from this added metadata - */ - - val indexName = "${testIndexName}_index_2" - val policyID = "${testIndexName}_testPolicyName_2" - val actionConfig = ReplicaCountAction(10, 0) - val states = listOf(State(name = "ReplicaCountState", actions = listOf(actionConfig), transitions = listOf())) - val policy = Policy( - id = policyID, - description = "$testIndexName description", - schemaVersion = 1L, - lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), - errorNotification = randomErrorNotification(), - defaultState = states[0].name, - states = states - ) - - createPolicy(policy, policyID) - createIndex(indexName) - addPolicyToIndex(indexName, policyID) - - val managedIndexConfig = getExistingManagedIndexConfig(indexName) - // manually add policy field into managed-index - updateManagedIndexConfigPolicy(managedIndexConfig, policy) - logger.info("managed-index: ${getExistingManagedIndexConfig(indexName)}") - - // manually save metadata into cluster state - var indexMetadata = getIndexMetadata(indexName) - metadataToClusterState = metadataToClusterState.copy( - index = indexName, - indexUuid = indexMetadata.indexUUID, - policyID = policyID - ) - val request = UpdateManagedIndexMetaDataRequest( - indicesToAddManagedIndexMetaDataTo = listOf( - Pair(Index(metadataToClusterState.index, metadataToClusterState.indexUuid), metadataToClusterState) - ) - ) - val response: AcknowledgedResponse = client().execute( - UpdateManagedIndexMetaDataAction.INSTANCE, request - ).get() - - logger.info(response.isAcknowledged) - indexMetadata = getIndexMetadata(indexName) - logger.info("check if metadata is saved in cluster state: ${indexMetadata.getCustomData("managed_index_metadata")}") - - waitFor { - assertEquals( - METADATA_MOVING_WARNING, - getExplainManagedIndexMetaData(indexName).info?.get("message") - ) - } - - waitFor(Instant.ofEpochSecond(120)) { - assertEquals( - "Happy moving", - getExplainManagedIndexMetaData(indexName).info?.get("message") - ) - assertEquals(null, getIndexMetadata(indexName).getCustomData("managed_index_metadata")) - } - - logger.info("metadata has moved") - - // start the job run - updateManagedIndexConfigStartTime(managedIndexConfig) - waitFor { - assertEquals( - "Index did not set number_of_replicas to ${actionConfig.numOfReplicas}", - actionConfig.numOfReplicas, - getNumberOfReplicasSetting(indexName) - ) - } - } - - fun `test clean corrupt metadata`() { - val indexName = "${testIndexName}_index_3" - val policyID = "${testIndexName}_testPolicyName_3" - val action = ReplicaCountAction(10, 0) - val states = listOf(State(name = "ReplicaCountState", actions = listOf(action), transitions = listOf())) - val policy = Policy( - id = policyID, - description = "$testIndexName description", - schemaVersion = 1L, - lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), - errorNotification = randomErrorNotification(), - defaultState = states[0].name, - states = states - ) - - createPolicy(policy, policyID) - createIndex(indexName) - - // create a job - addPolicyToIndex(indexName, policyID) - - // put some metadata into cluster state - val indexMetadata = getIndexMetadata(indexName) - metadataToClusterState = metadataToClusterState.copy( - index = indexName, - indexUuid = "randomindexuuid", - policyID = policyID - ) - val request = UpdateManagedIndexMetaDataRequest( - indicesToAddManagedIndexMetaDataTo = listOf( - Pair(Index(indexName, indexMetadata.indexUUID), metadataToClusterState) - ) - ) - client().execute(UpdateManagedIndexMetaDataAction.INSTANCE, request).get() - logger.info("check if metadata is saved in cluster state: ${getIndexMetadata(indexName).getCustomData("managed_index_metadata")}") - - waitFor { - assertEquals( - METADATA_CORRUPT_WARNING, - getExplainManagedIndexMetaData(indexName).info?.get("message") - ) - } - - waitFor(Instant.ofEpochSecond(120)) { - assertEquals(null, getExplainManagedIndexMetaData(indexName).info?.get("message")) - assertEquals(null, getIndexMetadata(indexName).getCustomData("managed_index_metadata")) - } - - logger.info("corrupt metadata has been cleaned") - } - - fun `test new node skip execution when old node exist in cluster`() { - Assume.assumeTrue(isMixedNodeRegressionTest) - - /** - * mixedCluster-0 is new node, mixedCluster-1 is old node - * - * set config index to only have one shard on new node - * so old node cannot run job because it has no shard - * new node also cannot run job because there is an old node - * here we check no job can be run - * - * then reroute shard to old node and this old node can run job - */ - - val indexName = "${testIndexName}_index_1" - val policyID = "${testIndexName}_testPolicyName_1" - val actionConfig = ReplicaCountAction(10, 0) - val states = listOf(State(name = "ReplicaCountState", actions = listOf(actionConfig), transitions = listOf())) - val policy = Policy( - id = policyID, - description = "$testIndexName description", - schemaVersion = 1L, - lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), - errorNotification = randomErrorNotification(), - defaultState = states[0].name, - states = states - ) - - createPolicy(policy, policyID) - createIndex(indexName) - - val settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .build() - updateIndexSettings(INDEX_MANAGEMENT_INDEX, settings) - - // check config index shard position - val shardsResponse = catIndexShard(INDEX_MANAGEMENT_INDEX) - logger.info("check config index shard: $shardsResponse") - val shardNode = (shardsResponse[0] as HashMap<*, *>)["node"] - - sleep(3000) // wait some time for cluster to be stable - - // move shard on node1 to node0 if exist - if (shardNode == "mixedCluster-1") rerouteShard(INDEX_MANAGEMENT_INDEX, "mixedCluster-1", "mixedCluster-0") - - addPolicyToIndex(indexName, policyID) - - val managedIndexConfig = getExistingManagedIndexConfig(indexName) - updateManagedIndexConfigStartTime(managedIndexConfig) - - // check no job has been run - wait { assertEquals(null, getExistingManagedIndexConfig(indexName).policy) } - - // reroute shard to node1 - rerouteShard(INDEX_MANAGEMENT_INDEX, "mixedCluster-0", "mixedCluster-1") - - val shardsResponse2 = catIndexShard(INDEX_MANAGEMENT_INDEX) - logger.info("check config index shard: $shardsResponse2") - - // job can be ran now - updateManagedIndexConfigStartTime(managedIndexConfig) - waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } - } -} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataServiceTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataServiceTests.kt deleted file mode 100644 index ecbfda5f4..000000000 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/MetadataServiceTests.kt +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement - -import com.nhaarman.mockitokotlin2.any -import com.nhaarman.mockitokotlin2.doAnswer -import com.nhaarman.mockitokotlin2.doReturn -import com.nhaarman.mockitokotlin2.mock -import com.nhaarman.mockitokotlin2.never -import com.nhaarman.mockitokotlin2.verify -import com.nhaarman.mockitokotlin2.whenever -import kotlinx.coroutines.runBlocking -import org.junit.Before -import org.opensearch.action.ActionListener -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse -import org.opensearch.client.AdminClient -import org.opensearch.client.Client -import org.opensearch.client.ClusterAdminClient -import org.opensearch.cluster.ClusterState -import org.opensearch.cluster.metadata.Metadata -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.collect.ImmutableOpenMap -import org.opensearch.indexmanagement.IndexManagementIndices -import org.opensearch.test.OpenSearchTestCase -import kotlin.test.assertFailsWith - -class MetadataServiceTests : OpenSearchTestCase() { - - private val clusterService: ClusterService = mock() - private val clusterState: ClusterState = mock() - private val metadata: Metadata = mock() - private val imIndices: IndexManagementIndices = mock() - - private val ex = Exception() - - @Before - fun setup() { - whenever(clusterService.state()).doReturn(clusterState) - whenever(clusterState.metadata).doReturn(metadata) - whenever(metadata.indices).doReturn(ImmutableOpenMap.of()) - } - - fun `test config index not exists`() = runBlocking { - whenever(imIndices.indexManagementIndexExists()).doReturn(false) - - val client = getClient( - getAdminClient( - getClusterAdminClient( - updateSettingResponse = null, - updateSettingException = ex - ) - ) - ) - val skipFlag = SkipExecution(client) - val metadataService = MetadataService(client, clusterService, skipFlag, imIndices) - metadataService.moveMetadata() - - verify(client.admin().cluster(), never()).updateSettings(any(), any()) - assertEquals(metadataService.finishFlag, true) - } - - // If update setting to 1 failed with some exception, runTimeCounter shouldn't be increased - fun `test failed to update setting to 1`() = runBlocking { - whenever(imIndices.indexManagementIndexExists()).doReturn(true) - - val client = getClient( - getAdminClient( - getClusterAdminClient( - updateSettingResponse = null, - updateSettingException = ex - ) - ) - ) - - val skipFlag = SkipExecution(client) - val metadataService = MetadataService(client, clusterService, skipFlag, imIndices) - metadataService.moveMetadata() - assertEquals(metadataService.runTimeCounter, 2) - metadataService.moveMetadata() - assertEquals(metadataService.runTimeCounter, 3) - metadataService.moveMetadata() - assertEquals(metadataService.runTimeCounter, 4) - assertFailsWith(Exception::class) { - runBlocking { - metadataService.moveMetadata() - } - } - assertEquals(metadataService.runTimeCounter, 4) - assertEquals(metadataService.finishFlag, false) - } - - private fun getClient(adminClient: AdminClient): Client = mock { on { admin() } doReturn adminClient } - - private fun getAdminClient(clusterAdminClient: ClusterAdminClient): AdminClient = mock { on { cluster() } doReturn clusterAdminClient } - - private fun getClusterAdminClient( - updateSettingResponse: ClusterUpdateSettingsResponse?, - updateSettingException: Exception? - ): ClusterAdminClient { - assertTrue( - "Must provide either a getMappingsResponse or getMappingsException", - (updateSettingResponse != null).xor(updateSettingException != null) - ) - - return mock { - doAnswer { invocationOnMock -> - val listener = invocationOnMock.getArgument>(1) - if (updateSettingResponse != null) listener.onResponse(updateSettingResponse) - else listener.onFailure(updateSettingException) - }.whenever(this.mock).updateSettings(any(), any()) - } - } -} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/IndexStateManagementHistoryIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/IndexStateManagementHistoryIT.kt index b94eedeae..d46d5318a 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/IndexStateManagementHistoryIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/action/IndexStateManagementHistoryIT.kt @@ -395,6 +395,7 @@ class IndexStateManagementHistoryIT : IndexStateManagementRestTestCase() { } fun `test history shard settings`() { + deleteIndex(IndexManagementIndices.HISTORY_ALL) val indexName = "${testIndexName}_shard_settings" val policyID = "${testIndexName}_shard_settings_1" val actionConfig = ReadOnlyAction(0) @@ -429,7 +430,8 @@ class IndexStateManagementHistoryIT : IndexStateManagementRestTestCase() { waitFor { assertIndexExists(IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS) val indexSettings = getIndexSettings(IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS) - val historyIndexName = indexSettings.keys.filter { it.startsWith(IndexManagementIndices.HISTORY_INDEX_BASE) }.firstOrNull() + val historyIndexName = + indexSettings.keys.firstOrNull { it.startsWith(IndexManagementIndices.HISTORY_INDEX_BASE) } assertNotNull("Could not find a concrete history index", historyIndexName) assertEquals("Wrong number of shards", 2, getNumberOfShardsSetting(historyIndexName!!)) assertEquals("Wrong number of replicas", 3, getNumberOfReplicasSetting(historyIndexName)) diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/coordinator/ManagedIndexCoordinatorTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/coordinator/ManagedIndexCoordinatorTests.kt index f455fca15..59464d152 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/coordinator/ManagedIndexCoordinatorTests.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/coordinator/ManagedIndexCoordinatorTests.kt @@ -19,9 +19,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.indexmanagement.IndexManagementIndices import org.opensearch.indexmanagement.indexstatemanagement.IndexMetadataProvider import org.opensearch.indexmanagement.indexstatemanagement.ManagedIndexCoordinator -import org.opensearch.indexmanagement.indexstatemanagement.MetadataService import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.migration.ISMTemplateService import org.opensearch.test.ClusterServiceUtils import org.opensearch.test.OpenSearchTestCase import org.opensearch.threadpool.Scheduler @@ -36,8 +34,6 @@ class ManagedIndexCoordinatorTests : OpenSearchAllocationTestCase() { private lateinit var settings: Settings private lateinit var indexManagementIndices: IndexManagementIndices - private lateinit var metadataService: MetadataService - private lateinit var templateService: ISMTemplateService private lateinit var coordinator: ManagedIndexCoordinator private lateinit var indexMetadataProvider: IndexMetadataProvider @@ -49,8 +45,6 @@ class ManagedIndexCoordinatorTests : OpenSearchAllocationTestCase() { client = Mockito.mock(Client::class.java) threadPool = Mockito.mock(ThreadPool::class.java) indexManagementIndices = Mockito.mock(IndexManagementIndices::class.java) - metadataService = Mockito.mock(MetadataService::class.java) - templateService = Mockito.mock(ISMTemplateService::class.java) val namedXContentRegistryEntries = arrayListOf() xContentRegistry = NamedXContentRegistry(namedXContentRegistryEntries) @@ -76,14 +70,13 @@ class ManagedIndexCoordinatorTests : OpenSearchAllocationTestCase() { clusterService = Mockito.spy(originClusterService) indexMetadataProvider = IndexMetadataProvider(settings, client, clusterService, mutableMapOf()) coordinator = ManagedIndexCoordinator( - settings, client, clusterService, threadPool, indexManagementIndices, metadataService, - templateService, indexMetadataProvider + settings, client, clusterService, threadPool, indexManagementIndices, indexMetadataProvider ) } fun `test after start`() { coordinator.afterStart() - Mockito.verify(threadPool, Mockito.times(2)).scheduleWithFixedDelay(Mockito.any(), Mockito.any(), Mockito.anyString()) + Mockito.verify(threadPool, Mockito.times(1)).scheduleWithFixedDelay(Mockito.any(), Mockito.any(), Mockito.anyString()) } fun `test before stop`() { @@ -100,7 +93,7 @@ class ManagedIndexCoordinatorTests : OpenSearchAllocationTestCase() { fun `test on cluster manager`() { coordinator.onClusterManager() - Mockito.verify(threadPool, Mockito.times(3)).scheduleWithFixedDelay(Mockito.any(), Mockito.any(), Mockito.anyString()) + Mockito.verify(threadPool, Mockito.times(1)).scheduleWithFixedDelay(Mockito.any(), Mockito.any(), Mockito.anyString()) } fun `test off cluster manager`() { diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServicesIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServicesIT.kt deleted file mode 100644 index b082352fe..000000000 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/migration/MigrationServicesIT.kt +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.indexmanagement.indexstatemanagement.migration - -import org.junit.Assume -import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase -import org.opensearch.indexmanagement.indexstatemanagement.randomPolicy -import org.opensearch.indexmanagement.indexstatemanagement.settings.LegacyOpenDistroManagedIndexSettings -import org.opensearch.indexmanagement.indexstatemanagement.settings.ManagedIndexSettings -import org.opensearch.indexmanagement.waitFor -import java.time.Instant - -class MigrationServicesIT : IndexStateManagementRestTestCase() { - fun `test v1 index templates different order migration`() { - val policyID1 = "p1" - val policyID2 = "p2" - createPolicy(randomPolicy(), policyID1) - createPolicy(randomPolicy(), policyID2) - createV1Template("t1", "a*", policyID1, order = -1) - createV1Template("t2", "ab*", policyID1) - createV1Template("t3", "ab*", policyID2, order = 1) - createV1Template2("t4", "ab*", order = 0) - enableISMTemplateMigration() - - waitFor(Instant.ofEpochSecond(80)) { - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.indexPatterns.toString(), "[ab*]") - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.priority, 2) - } - - // 1s interval to let the ism_template becomes searchable so that coordinator - // can pick it up - Thread.sleep(1_000) - // need to delete overlapping template, otherwise warning will fail the test - deleteV1Template("t1") - deleteV1Template("t2") - deleteV1Template("t3") - deleteV1Template("t4") - - val indexName = "ab_index" - createIndex(indexName, policyID = null) - waitFor { - assertPredicatesOnMetaData( - listOf( - indexName to listOf( - ManagedIndexSettings.POLICY_ID.key to policyID2::equals, - LegacyOpenDistroManagedIndexSettings.POLICY_ID.key to policyID2::equals - ) - ), - getExplainMap(indexName), false - ) - } - } - - fun `test v1 index templates migration`() { - // cat/templates API could return template info in different order in multi-node test - // so skip for multi-node test - Assume.assumeFalse(isMultiNode) - - val policyID1 = "p1" - val policyID2 = "p2" - createPolicy(randomPolicy(), policyID1) - createPolicy(randomPolicy(), policyID2) - createV1Template("t1", "a*", policyID1) - createV1Template("t2", "ab*", policyID2) - enableISMTemplateMigration() - - // cat templates, check t1 t2 order - val order = getTemplatesOrder() - - // t1, t2 - if (order == listOf("t1", "t2")) { - waitFor(Instant.ofEpochSecond(80)) { - assertEquals(getPolicy(policyID1).ismTemplate?.first()?.indexPatterns.toString(), "[a*]") - assertEquals(getPolicy(policyID1).ismTemplate?.first()?.priority, 1) - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.indexPatterns.toString(), "[ab*]") - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.priority, 0) - } - } - - // t2, t1 - if (order == listOf("t2", "t1")) { - waitFor(Instant.ofEpochSecond(80)) { - waitFor(Instant.ofEpochSecond(80)) { - assertEquals(getPolicy(policyID1).ismTemplate?.first()?.indexPatterns.toString(), "[a*]") - assertEquals(getPolicy(policyID1).ismTemplate?.first()?.priority, 0) - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.indexPatterns.toString(), "[ab*]") - assertEquals(getPolicy(policyID2).ismTemplate?.first()?.priority, 1) - } - } - } - - // 1s interval to let the ism_template becomes searchable so that coordinator - // can pick it up - Thread.sleep(1_000) - deleteV1Template("t1") - deleteV1Template("t2") - - if (order == listOf("t1", "t2")) { - val indexName = "ab_index" - createIndex(indexName, policyID = null) - waitFor { - assertPredicatesOnMetaData( - listOf( - indexName to listOf( - ManagedIndexSettings.POLICY_ID.key to policyID1::equals, - LegacyOpenDistroManagedIndexSettings.POLICY_ID.key to policyID1::equals - ) - ), - getExplainMap(indexName), false - ) - } - } - - if (order == listOf("t2", "t1")) { - val indexName = "ab_index" - createIndex(indexName, policyID = null) - waitFor { - assertPredicatesOnMetaData( - listOf( - indexName to listOf( - ManagedIndexSettings.POLICY_ID.key to policyID2::equals, - LegacyOpenDistroManagedIndexSettings.POLICY_ID.key to policyID2::equals - ) - ), - getExplainMap(indexName), false - ) - } - } - } - - @Suppress("UNCHECKED_CAST") - private fun getTemplatesOrder(): List { - val order = catIndexTemplates().map { - val row = it as Map - row["name"] - } - return order - } - - fun `test v2 index templates migration`() { - val policyID1 = "p1" - createPolicy(randomPolicy(), policyID1) - createV2Template("t1", "a*", policyID1) - enableISMTemplateMigration() - - waitFor(Instant.ofEpochSecond(80)) { - assertEquals(getPolicy(policyID1).ismTemplate?.first()?.indexPatterns.toString(), "[a*]") - } - - // 1s interval to let the ism_template becomes searchable so that coordinator - // can pick it up - Thread.sleep(1_000) - deleteV2Template("t1") - - val indexName = "ab_index" - createIndex(indexName, policyID = null) - waitFor { - assertPredicatesOnMetaData( - listOf( - indexName to listOf( - ManagedIndexSettings.POLICY_ID.key to policyID1::equals, - LegacyOpenDistroManagedIndexSettings.POLICY_ID.key to policyID1::equals - ) - ), - getExplainMap(indexName), false - ) - } - } - - private fun enableISMTemplateMigration() { - updateClusterSetting(ManagedIndexSettings.TEMPLATE_MIGRATION_CONTROL.key, "-1") - updateClusterSetting(ManagedIndexSettings.TEMPLATE_MIGRATION_CONTROL.key, "0") - } -} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/AttemptTransitionStepTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/AttemptTransitionStepTests.kt index aeeedd7c9..3f3114f10 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/AttemptTransitionStepTests.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/step/AttemptTransitionStepTests.kt @@ -23,7 +23,6 @@ import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.Metadata import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.collect.ImmutableOpenMap import org.opensearch.common.settings.ClusterSettings import org.opensearch.common.settings.Settings import org.opensearch.index.shard.DocsStats @@ -50,7 +49,7 @@ class AttemptTransitionStepTests : OpenSearchTestCase() { private val indexUUID: String = "indexUuid" @Suppress("UNCHECKED_CAST") private val indexMetadata: IndexMetadata = mock { - on { rolloverInfos } doReturn ImmutableOpenMap.builder().build() + on { rolloverInfos } doReturn mapOf() on { indexUUID } doReturn indexUUID } private val metadata: Metadata = mock { diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtilsTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtilsTests.kt index 5419cf2c5..a9a671e72 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtilsTests.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/util/StepUtilsTests.kt @@ -37,7 +37,7 @@ class StepUtilsTests : OpenSearchTestCase() { ) val lockModel = getShrinkLockModel(shrinkActionProperties) assertEquals("Incorrect lock model job index name", INDEX_MANAGEMENT_INDEX, lockModel.jobIndexName) - assertEquals("Incorrect lock model jobID", getShrinkLockID(shrinkActionProperties.nodeName), lockModel.jobId) + assertEquals("Incorrect lock model jobID", getShrinkJobID(shrinkActionProperties.nodeName), lockModel.jobId) assertEquals("Incorrect lock model duration", shrinkActionProperties.lockDurationSecond, lockModel.lockDurationSeconds) assertEquals("Incorrect lock model lockID", "${lockModel.jobIndexName}-${lockModel.jobId}", lockModel.lockId) assertEquals("Incorrect lock model sequence number", shrinkActionProperties.lockSeqNo, lockModel.seqNo) @@ -129,9 +129,9 @@ class StepUtilsTests : OpenSearchTestCase() { val clusterSettings = ClusterSettings(settings.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) val remainingSpace = freeBytes - ((2 * indexSize) + threshold) if (remainingSpace > 0) { - assertEquals(remainingSpace, getNodeFreeMemoryAfterShrink(nodeStats, indexSize, clusterSettings)) + assertEquals(remainingSpace, getNodeFreeDiskSpaceAfterShrink(nodeStats, indexSize, clusterSettings)) } else { - assertEquals(-1L, getNodeFreeMemoryAfterShrink(nodeStats, indexSize, clusterSettings)) + assertEquals(-1L, getNodeFreeDiskSpaceAfterShrink(nodeStats, indexSize, clusterSettings)) } } } diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateCloseIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateCloseIT.kt new file mode 100644 index 000000000..091d948de --- /dev/null +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateCloseIT.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase +import org.opensearch.indexmanagement.indexstatemanagement.action.CloseAction +import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.model.State +import org.opensearch.indexmanagement.indexstatemanagement.randomErrorNotification +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.waitFor +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Locale + +class ValidateCloseIT : IndexStateManagementRestTestCase() { + private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) + + fun `test basic close action validation`() { + enableValidationService() + val indexName = "${testIndexName}_index_1" + val policyID = "${testIndexName}_testPolicyName_1" + val actionConfig = CloseAction(0) + val states = listOf( + State("CloseState", listOf(actionConfig), listOf()) + ) + + val policy = Policy( + id = policyID, + description = "$testIndexName description", + schemaVersion = 1L, + lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), + errorNotification = randomErrorNotification(), + defaultState = states[0].name, + states = states + ) + createPolicy(policy, policyID) + createIndex(indexName, policyID) + + assertEquals("open", getIndexState(indexName)) + + val managedIndexConfig = getExistingManagedIndexConfig(indexName) + // Change the start time so the job will trigger in 2 seconds. + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } + + // Need to wait two cycles. + // Change the start time so the job will trigger in 2 seconds. + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { assertEquals("close", getIndexState(indexName)) } + + waitFor { + val data = getExplainValidationResult(indexName) + assertEquals( + "Index close action validation status is PASSED.", + Validate.ValidationStatus.PASSED, + data.validationStatus + ) + } + } +} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriorityIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriorityIT.kt new file mode 100644 index 000000000..c3a8ce71b --- /dev/null +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateIndexPriorityIT.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase +import org.opensearch.indexmanagement.indexstatemanagement.action.IndexPriorityAction +import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.model.State +import org.opensearch.indexmanagement.indexstatemanagement.randomErrorNotification +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.waitFor +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Locale + +class ValidateIndexPriorityIT : IndexStateManagementRestTestCase() { + private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) + + fun `test basic index priority`() { + val indexName = "${testIndexName}_index_1" + val policyID = "${testIndexName}_testPolicyName_1" + val actionConfig = IndexPriorityAction(50, 0) + val states = listOf(State(name = "SetPriorityState", actions = listOf(actionConfig), transitions = listOf())) + val policy = Policy( + id = policyID, + description = "$testIndexName description", + schemaVersion = 1L, + lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), + errorNotification = randomErrorNotification(), + defaultState = states[0].name, + states = states + ) + + createPolicy(policy, policyID) + createIndex(indexName, policyID) + + val managedIndexConfig = getExistingManagedIndexConfig(indexName) + // Change the runJob start time so the job will trigger in 2 seconds + updateManagedIndexConfigStartTime(managedIndexConfig) + + // ism policy initialized + waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } + + // change the runJob start time to change index priority + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { assertEquals("Index did not set index_priority to ${actionConfig.indexPriority}", actionConfig.indexPriority, getIndexPrioritySetting(indexName)) } + + waitFor { + val data = getExplainValidationResult(indexName) + assertEquals( + "Index Priority action validation status is PASSED.", + Validate.ValidationStatus.PASSED, + data.validationStatus + ) + } + } +} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshotIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshotIT.kt new file mode 100644 index 000000000..3f6dfd42c --- /dev/null +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateSnapshotIT.kt @@ -0,0 +1,73 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase +import org.opensearch.indexmanagement.indexstatemanagement.action.SnapshotAction +import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.model.State +import org.opensearch.indexmanagement.indexstatemanagement.randomErrorNotification +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.waitFor +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Locale + +class ValidateSnapshotIT : IndexStateManagementRestTestCase() { + private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) + + fun `test basic snapshot validation`() { + val indexName = "${testIndexName}_index_basic" + val policyID = "${testIndexName}_policy_basic" + val repository = "repository" + val snapshot = "snapshot" + val actionConfig = SnapshotAction(repository, snapshot, 0) + val states = listOf( + State("Snapshot", listOf(actionConfig), listOf()) + ) + + createRepository(repository) + + val policy = Policy( + id = policyID, + description = "$testIndexName description", + schemaVersion = 1L, + lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), + errorNotification = randomErrorNotification(), + defaultState = states[0].name, + states = states + ) + createPolicy(policy, policyID) + createIndex(indexName, policyID) + + val managedIndexConfig = getExistingManagedIndexConfig(indexName) + + // Change the start time so the job will trigger in 2 seconds. + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } + + // Need to wait two cycles for wait for snapshot step + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { + val data = getExplainValidationResult(indexName) + assertEquals( + "Index snapshot action validation status is PASSED.", + Validate.ValidationStatus.PASSED, + data.validationStatus + ) + } + + waitFor { assertSnapshotExists(repository, "snapshot") } + waitFor { assertSnapshotFinishedWithSuccess(repository, "snapshot") } + } +} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransitionIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransitionIT.kt new file mode 100644 index 000000000..a2383b7b6 --- /dev/null +++ b/src/test/kotlin/org/opensearch/indexmanagement/indexstatemanagement/validation/ValidateTransitionIT.kt @@ -0,0 +1,69 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.indexmanagement.indexstatemanagement.validation + +import org.opensearch.indexmanagement.indexstatemanagement.IndexStateManagementRestTestCase +import org.opensearch.indexmanagement.indexstatemanagement.model.Conditions +import org.opensearch.indexmanagement.indexstatemanagement.model.Policy +import org.opensearch.indexmanagement.indexstatemanagement.model.State +import org.opensearch.indexmanagement.indexstatemanagement.model.Transition +import org.opensearch.indexmanagement.indexstatemanagement.randomErrorNotification +import org.opensearch.indexmanagement.spi.indexstatemanagement.Validate +import org.opensearch.indexmanagement.waitFor +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Locale + +class ValidateTransitionIT : IndexStateManagementRestTestCase() { + + private val testIndexName = javaClass.simpleName.lowercase(Locale.ROOT) + + fun `test transition validation with doc count condition`() { + enableValidationService() + val indexName = "${testIndexName}_index_1" + val policyID = "${testIndexName}_testPolicyName_1" + val secondStateName = "second" + val states = listOf( + State("first", listOf(), listOf(Transition(secondStateName, Conditions(docCount = 5L)))), + State(secondStateName, listOf(), listOf()) + ) + + val policy = Policy( + id = policyID, + description = "$testIndexName description", + schemaVersion = 1L, + lastUpdatedTime = Instant.now().truncatedTo(ChronoUnit.MILLIS), + errorNotification = randomErrorNotification(), + defaultState = states[0].name, + states = states + ) + + createPolicy(policy, policyID) + createIndex(indexName, policyID) + + val managedIndexConfig = getExistingManagedIndexConfig(indexName) + + // Initializing the policy/metadata + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { assertEquals(policyID, getExplainManagedIndexMetaData(indexName).policyID) } + + // Add 6 documents (>5) + insertSampleData(indexName, 6) + + // Evaluating transition conditions for second time + updateManagedIndexConfigStartTime(managedIndexConfig) + + waitFor { + val data = getExplainValidationResult(indexName) + assertEquals( + "Index transition validation status is PASSED.", + Validate.ValidationStatus.PASSED, + data.validationStatus + ) + } + } +} diff --git a/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperServiceTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperServiceTests.kt index f606e207b..5d8004107 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperServiceTests.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMapperServiceTests.kt @@ -21,7 +21,6 @@ import org.opensearch.client.IndicesAdminClient import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.metadata.MappingMetadata import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.collect.ImmutableOpenMap import org.opensearch.common.xcontent.XContentType import org.opensearch.indexmanagement.rollup.model.RollupJobValidationResult import org.opensearch.test.OpenSearchTestCase @@ -292,16 +291,14 @@ class RollupMapperServiceTests : OpenSearchTestCase() { private fun getMappingResponse(indexName: String, emptyMapping: Boolean = false): GetMappingsResponse { val mappings = if (emptyMapping) { - ImmutableOpenMap.Builder().build() + mapOf() } else { val mappingSourceMap = createParser( XContentType.JSON.xContent(), javaClass.classLoader.getResource("mappings/kibana-sample-data.json").readText() ).map() val mappingMetadata = MappingMetadata("_doc", mappingSourceMap) // it seems it still expects a type, i.e. _doc now - ImmutableOpenMap.Builder() - .fPut(indexName, mappingMetadata) - .build() + mapOf(indexName to mappingMetadata) } return GetMappingsResponse(mappings) diff --git a/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataServiceTests.kt b/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataServiceTests.kt index d31ff9edb..46e62057d 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataServiceTests.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/rollup/RollupMetadataServiceTests.kt @@ -735,7 +735,7 @@ class RollupMetadataServiceTests : OpenSearchTestCase() { // TODO: Mockito 2 supposedly should be able to mock final classes but there were errors when trying to do so // Will need to check if there is a workaround or a better way to mock getting hits.hits since this current approach is verbose - val docField = DocumentField(dateHistogram.sourceField, listOf(getInstant(timestamp).toEpochMilli().toString())) + val docField = DocumentField(dateHistogram.sourceField, listOf(timestamp)) val searchHit = SearchHit(0) searchHit.setDocumentField(dateHistogram.sourceField, docField) val searchHits = SearchHits(arrayOf(searchHit), null, 0.0F) diff --git a/src/test/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptorIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptorIT.kt index 78cd27062..8af7a035b 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptorIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/rollup/interceptor/RollupInterceptorIT.kt @@ -7,6 +7,7 @@ package org.opensearch.indexmanagement.rollup.interceptor import org.apache.hc.core5.http.ContentType import org.apache.hc.core5.http.io.entity.StringEntity +import org.junit.Assert import org.opensearch.client.ResponseException import org.opensearch.indexmanagement.common.model.dimension.DateHistogram import org.opensearch.indexmanagement.common.model.dimension.Terms @@ -1710,4 +1711,169 @@ class RollupInterceptorIT : RollupRestTestCase() { assertTrue("The query_string query field check failed!", e.message!!.contains("Could not find a rollup job that can answer this query because [missing field unknown_field]")) } } + + fun `test roll up search query_string query with index pattern as source`() { + val sourceIndex = "source_111_rollup_search_qsq_98243" + val targetIndex = "target_rollup_qsq_search_98243" + + createSampleIndexForQSQTest(sourceIndex) + + val rollup = Rollup( + id = "basic_query_string_query_rollup_search98243", + enabled = true, + schemaVersion = 1L, + jobSchedule = IntervalSchedule(Instant.now(), 1, ChronoUnit.MINUTES), + jobLastUpdatedTime = Instant.now(), + jobEnabledTime = Instant.now(), + description = "basic search test", + sourceIndex = "source_111*", + targetIndex = targetIndex, + metadataID = null, + roles = emptyList(), + pageSize = 10, + delay = 0, + continuous = false, + dimensions = listOf( + DateHistogram(sourceField = "event_ts", fixedInterval = "1h"), + Terms("state", "state"), + Terms("state_ext", "state_ext"), + Terms("state_ext2", "state_ext2"), + Terms("state_ordinal", "state_ordinal"), + Terms("abc test", "abc test"), + ), + metrics = listOf( + RollupMetrics( + sourceField = "earnings", targetField = "earnings", + metrics = listOf( + Sum(), Min(), Max(), + ValueCount(), Average() + ) + ) + ) + ).let { createRollup(it, it.id) } + + updateRollupStartTime(rollup) + + waitFor { + val rollupJob = getRollup(rollupId = rollup.id) + assertNotNull("Rollup job doesn't have metadata set", rollupJob.metadataID) + val rollupMetadata = getRollupMetadata(rollupJob.metadataID!!) + assertEquals("Rollup is not finished", RollupMetadata.Status.FINISHED, rollupMetadata.status) + } + + refreshAllIndices() + + // Term query + var req = """ + { + "size": 0, + "query": { + "query_string": { + "query": "state:TX AND state_ext:CA AND 0", + "default_field": "state_ordinal" + } + + }, + "aggs": { + "earnings_total": { + "sum": { + "field": "earnings" + } + } + } + } + """.trimIndent() + var rawRes = client().makeRequest("POST", "/$sourceIndex/_search", emptyMap(), StringEntity(req, ContentType.APPLICATION_JSON)) + assertTrue(rawRes.restStatus() == RestStatus.OK) + var rollupRes = client().makeRequest("POST", "/$targetIndex/_search", emptyMap(), StringEntity(req, ContentType.APPLICATION_JSON)) + assertTrue(rollupRes.restStatus() == RestStatus.OK) + var rawAggRes = rawRes.asMap()["aggregations"] as Map> + var rollupAggRes = rollupRes.asMap()["aggregations"] as Map> + assertEquals( + "Source and rollup index did not return same min results", + rawAggRes.getValue("earnings_total")["value"], + rollupAggRes.getValue("earnings_total")["value"] + ) + } + + fun `test roll up search query_string query with index pattern as source deleted`() { + val sourceIndex = "source_999_rollup_search_qsq_982439" + val targetIndex = "target_rollup_qsq_search_982439" + + createSampleIndexForQSQTest(sourceIndex) + + val rollup = Rollup( + id = "basic_query_string_query_rollup_search982499", + enabled = true, + schemaVersion = 1L, + jobSchedule = IntervalSchedule(Instant.now(), 1, ChronoUnit.MINUTES), + jobLastUpdatedTime = Instant.now(), + jobEnabledTime = Instant.now(), + description = "basic search test", + sourceIndex = "source_999*", + targetIndex = targetIndex, + metadataID = null, + roles = emptyList(), + pageSize = 10, + delay = 0, + continuous = false, + dimensions = listOf( + DateHistogram(sourceField = "event_ts", fixedInterval = "1h"), + Terms("state", "state"), + Terms("state_ext", "state_ext"), + Terms("state_ext2", "state_ext2"), + Terms("state_ordinal", "state_ordinal"), + Terms("abc test", "abc test"), + ), + metrics = listOf( + RollupMetrics( + sourceField = "earnings", targetField = "earnings", + metrics = listOf( + Sum(), Min(), Max(), + ValueCount(), Average() + ) + ) + ) + ).let { createRollup(it, it.id) } + + updateRollupStartTime(rollup) + + waitFor { + val rollupJob = getRollup(rollupId = rollup.id) + assertNotNull("Rollup job doesn't have metadata set", rollupJob.metadataID) + val rollupMetadata = getRollupMetadata(rollupJob.metadataID!!) + assertEquals("Rollup is not finished", RollupMetadata.Status.FINISHED, rollupMetadata.status) + } + + refreshAllIndices() + + deleteIndex(sourceIndex) + + // Term query + var req = """ + { + "size": 0, + "query": { + "query_string": { + "query": "state:TX AND state_ext:CA AND 0", + "default_field": "state_ordinal" + } + + }, + "aggs": { + "earnings_total": { + "sum": { + "field": "earnings" + } + } + } + } + """.trimIndent() + try { + client().makeRequest("POST", "/$targetIndex/_search", emptyMap(), StringEntity(req, ContentType.APPLICATION_JSON)) + fail("Failure was expected when searching rollup index using qsq query when sourceIndex does not exist!") + } catch (e: ResponseException) { + Assert.assertTrue(e.message!!.contains("Can't parse query_string query without sourceIndex mappings!")) + } + } } diff --git a/src/test/kotlin/org/opensearch/indexmanagement/rollup/runner/RollupRunnerIT.kt b/src/test/kotlin/org/opensearch/indexmanagement/rollup/runner/RollupRunnerIT.kt index 3c2f10067..168ea3a64 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/rollup/runner/RollupRunnerIT.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/rollup/runner/RollupRunnerIT.kt @@ -33,6 +33,7 @@ import org.opensearch.rest.RestRequest import org.opensearch.rest.RestStatus import java.time.Instant import java.time.temporal.ChronoUnit +import java.util.Collections.emptyMap class RollupRunnerIT : RollupRestTestCase() { @@ -1253,6 +1254,71 @@ class RollupRunnerIT : RollupRestTestCase() { assertEquals("Backing index [$backingIndex2] has to have owner rollup job with id:[${startedRollup1.id}]", rollupMetadata.failureReason) } + fun `test rollup with date_nanos as date_histogram field`() { + val index = "date-nanos-index" + val rollupIndex = "date-nanos-index-rollup" + createIndex( + index, + Settings.EMPTY, + """"properties": { + "purchaseDate": { + "type": "date_nanos" + }, + "itemName": { + "type": "keyword" + }, + "itemPrice": { + "type": "float" + } + }""" + ) + + indexDoc(index, "1", """{"purchaseDate": 1683149130000.6497, "itemName": "shoes", "itemPrice": 100.5}""".trimIndent()) + indexDoc(index, "2", """{"purchaseDate": 1683494790000, "itemName": "shoes", "itemPrice": 30.0}""".trimIndent()) + indexDoc(index, "3", """{"purchaseDate": "2023-05-08T18:57:33.743656789Z", "itemName": "shoes", "itemPrice": 60.592}""".trimIndent()) + + refreshAllIndices() + + val job = Rollup( + id = "rollup_with_alias_992434131", + schemaVersion = 1L, + enabled = true, + jobSchedule = IntervalSchedule(Instant.now(), 1, ChronoUnit.DAYS), + jobLastUpdatedTime = Instant.now(), + jobEnabledTime = Instant.now(), + description = "basic change of page size", + sourceIndex = index, + targetIndex = rollupIndex, + metadataID = null, + roles = emptyList(), + pageSize = 1000, + delay = 0, + continuous = true, + dimensions = listOf( + DateHistogram(sourceField = "purchaseDate", fixedInterval = "5d"), + Terms("itemName", "itemName"), + ), + metrics = listOf( + RollupMetrics( + sourceField = "itemPrice", + targetField = "itemPrice", + metrics = listOf(Sum(), Min(), Max(), ValueCount(), Average()) + ) + ) + ).let { createRollup(it, it.id) } + + updateRollupStartTime(job) + + waitFor { assertTrue("Target rollup index was not created", indexExists(rollupIndex)) } + + waitFor { + val rollupJob = getRollup(rollupId = job.id) + assertNotNull("Rollup job doesn't have metadata set", rollupJob.metadataID) + val rollupMetadata = getRollupMetadata(rollupJob.metadataID!!) + assertEquals("Rollup is not started", RollupMetadata.Status.STARTED, rollupMetadata.status) + } + } + // TODO: Test scenarios: // - Source index deleted after first execution // * If this is with a source index pattern and the underlying indices are recreated but with different data diff --git a/src/test/kotlin/org/opensearch/indexmanagement/snapshotmanagement/TestUtils.kt b/src/test/kotlin/org/opensearch/indexmanagement/snapshotmanagement/TestUtils.kt index b4bebcf11..5a3c016d2 100644 --- a/src/test/kotlin/org/opensearch/indexmanagement/snapshotmanagement/TestUtils.kt +++ b/src/test/kotlin/org/opensearch/indexmanagement/snapshotmanagement/TestUtils.kt @@ -14,7 +14,6 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse import org.opensearch.action.index.IndexResponse import org.opensearch.cluster.SnapshotsInProgress import org.opensearch.common.UUIDs -import org.opensearch.common.collect.ImmutableOpenMap import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.core.xcontent.NamedXContentRegistry @@ -242,7 +241,7 @@ fun mockInProgressSnapshotInfo( emptyList(), randomNonNegativeLong(), randomNonNegativeLong(), - ImmutableOpenMap.of(), + mapOf(), "", mapOf("sm_policy" to "daily-snapshot"), Version.CURRENT,